xref: /openbsd-src/sys/dev/pci/if_ixgb.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /**************************************************************************
2 
3 Copyright (c) 2001-2005, Intel Corporation
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15 
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19 
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31 
32 ***************************************************************************/
33 
34 /* $OpenBSD: if_ixgb.c,v 1.69 2016/04/13 10:34:32 mpi Exp $ */
35 
36 #include <dev/pci/if_ixgb.h>
37 
38 #ifdef IXGB_DEBUG
39 /*********************************************************************
40  *  Set this to one to display debug statistics
41  *********************************************************************/
42 int             ixgb_display_debug_stats = 0;
43 #endif
44 
45 /*********************************************************************
46  *  Driver version
47  *********************************************************************/
48 
49 #define IXGB_DRIVER_VERSION	"6.1.0"
50 
51 /*********************************************************************
52  *  PCI Device ID Table
53  *********************************************************************/
54 
55 const struct pci_matchid ixgb_devices[] = {
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_SR },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_LR },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_CX4 },
60 };
61 
62 /*********************************************************************
63  *  Function prototypes
64  *********************************************************************/
65 int  ixgb_probe(struct device *, void *, void *);
66 void ixgb_attach(struct device *, struct device *, void *);
67 int  ixgb_intr(void *);
68 void ixgb_start(struct ifnet *);
69 int  ixgb_ioctl(struct ifnet *, u_long, caddr_t);
70 void ixgb_watchdog(struct ifnet *);
71 void ixgb_init(void *);
72 void ixgb_stop(void *);
73 void ixgb_media_status(struct ifnet *, struct ifmediareq *);
74 int  ixgb_media_change(struct ifnet *);
75 void ixgb_identify_hardware(struct ixgb_softc *);
76 int  ixgb_allocate_pci_resources(struct ixgb_softc *);
77 void ixgb_free_pci_resources(struct ixgb_softc *);
78 void ixgb_local_timer(void *);
79 int  ixgb_hardware_init(struct ixgb_softc *);
80 void ixgb_setup_interface(struct ixgb_softc *);
81 int  ixgb_setup_transmit_structures(struct ixgb_softc *);
82 void ixgb_initialize_transmit_unit(struct ixgb_softc *);
83 int  ixgb_setup_receive_structures(struct ixgb_softc *);
84 void ixgb_initialize_receive_unit(struct ixgb_softc *);
85 void ixgb_enable_intr(struct ixgb_softc *);
86 void ixgb_disable_intr(struct ixgb_softc *);
87 void ixgb_free_transmit_structures(struct ixgb_softc *);
88 void ixgb_free_receive_structures(struct ixgb_softc *);
89 void ixgb_update_stats_counters(struct ixgb_softc *);
90 void ixgb_txeof(struct ixgb_softc *);
91 int  ixgb_allocate_receive_structures(struct ixgb_softc *);
92 int  ixgb_allocate_transmit_structures(struct ixgb_softc *);
93 void ixgb_rxeof(struct ixgb_softc *, int);
94 void
95 ixgb_receive_checksum(struct ixgb_softc *,
96 		      struct ixgb_rx_desc * rx_desc,
97 		      struct mbuf *);
98 void
99 ixgb_transmit_checksum_setup(struct ixgb_softc *,
100 			     struct mbuf *,
101 			     u_int8_t *);
102 void ixgb_set_promisc(struct ixgb_softc *);
103 void ixgb_set_multi(struct ixgb_softc *);
104 #ifdef IXGB_DEBUG
105 void ixgb_print_hw_stats(struct ixgb_softc *);
106 #endif
107 void ixgb_update_link_status(struct ixgb_softc *);
108 int
109 ixgb_get_buf(struct ixgb_softc *, int i,
110 	     struct mbuf *);
111 void ixgb_enable_hw_vlans(struct ixgb_softc *);
112 int  ixgb_encap(struct ixgb_softc *, struct mbuf *);
113 int
114 ixgb_dma_malloc(struct ixgb_softc *, bus_size_t,
115 		struct ixgb_dma_alloc *, int);
116 void ixgb_dma_free(struct ixgb_softc *, struct ixgb_dma_alloc *);
117 
118 /*********************************************************************
119  *  OpenBSD Device Interface Entry Points
120  *********************************************************************/
121 
122 struct cfattach ixgb_ca = {
123 	sizeof(struct ixgb_softc), ixgb_probe, ixgb_attach
124 };
125 
126 struct cfdriver ixgb_cd = {
127 	NULL, "ixgb", DV_IFNET
128 };
129 
130 /* some defines for controlling descriptor fetches in h/w */
131 #define RXDCTL_PTHRESH_DEFAULT 0	/* chip considers prefech below this */
132 #define RXDCTL_HTHRESH_DEFAULT 0	/* chip will only prefetch if tail is
133 					 * pushed this many descriptors from
134 					 * head */
135 #define RXDCTL_WTHRESH_DEFAULT 0	/* chip writes back at this many or RXT0 */
136 
137 
138 /*********************************************************************
139  *  Device identification routine
140  *
141  *  ixgb_probe determines if the driver should be loaded on
142  *  adapter based on PCI vendor/device id of the adapter.
143  *
144  *  return 0 on no match, positive on match
145  *********************************************************************/
146 
147 int
148 ixgb_probe(struct device *parent, void *match, void *aux)
149 {
150 	INIT_DEBUGOUT("ixgb_probe: begin");
151 
152 	return (pci_matchbyid((struct pci_attach_args *)aux, ixgb_devices,
153 	    nitems(ixgb_devices)));
154 }
155 
156 /*********************************************************************
157  *  Device initialization routine
158  *
159  *  The attach entry point is called when the driver is being loaded.
160  *  This routine identifies the type of hardware, allocates all resources
161  *  and initializes the hardware.
162  *
163  *********************************************************************/
164 
165 void
166 ixgb_attach(struct device *parent, struct device *self, void *aux)
167 {
168 	struct pci_attach_args *pa = aux;
169 	struct ixgb_softc *sc;
170 	int             tsize, rsize;
171 
172 	INIT_DEBUGOUT("ixgb_attach: begin");
173 
174 	sc = (struct ixgb_softc *)self;
175 	sc->osdep.ixgb_pa = *pa;
176 
177 	timeout_set(&sc->timer_handle, ixgb_local_timer, sc);
178 
179 	/* Determine hardware revision */
180 	ixgb_identify_hardware(sc);
181 
182 	/* Parameters (to be read from user) */
183 	sc->num_tx_desc = IXGB_MAX_TXD;
184 	sc->num_rx_desc = IXGB_MAX_RXD;
185 	sc->tx_int_delay = TIDV;
186 	sc->rx_int_delay = RDTR;
187 	sc->rx_buffer_len = IXGB_RXBUFFER_2048;
188 
189 	/*
190 	 * These parameters control the automatic generation(Tx) and
191 	 * response(Rx) to Ethernet PAUSE frames.
192 	 */
193 	sc->hw.fc.high_water = FCRTH;
194 	sc->hw.fc.low_water = FCRTL;
195 	sc->hw.fc.pause_time = FCPAUSE;
196 	sc->hw.fc.send_xon = TRUE;
197 	sc->hw.fc.type = FLOW_CONTROL;
198 
199 	/* Set the max frame size assuming standard ethernet sized frames */
200 	sc->hw.max_frame_size = IXGB_MAX_JUMBO_FRAME_SIZE;
201 
202 	if (ixgb_allocate_pci_resources(sc))
203 		goto err_pci;
204 
205 	tsize = IXGB_ROUNDUP(sc->num_tx_desc * sizeof(struct ixgb_tx_desc),
206 	    IXGB_MAX_TXD * sizeof(struct ixgb_tx_desc));
207 	tsize = IXGB_ROUNDUP(tsize, PAGE_SIZE);
208 
209 	/* Allocate Transmit Descriptor ring */
210 	if (ixgb_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
211 		printf("%s: Unable to allocate TxDescriptor memory\n",
212 		       sc->sc_dv.dv_xname);
213 		goto err_tx_desc;
214 	}
215 	sc->tx_desc_base = (struct ixgb_tx_desc *) sc->txdma.dma_vaddr;
216 
217 	rsize = IXGB_ROUNDUP(sc->num_rx_desc * sizeof(struct ixgb_rx_desc),
218 	    IXGB_MAX_RXD * sizeof(struct ixgb_rx_desc));
219 	rsize = IXGB_ROUNDUP(rsize, PAGE_SIZE);
220 
221 	/* Allocate Receive Descriptor ring */
222 	if (ixgb_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
223 		printf("%s: Unable to allocate rx_desc memory\n",
224 		       sc->sc_dv.dv_xname);
225 		goto err_rx_desc;
226 	}
227 	sc->rx_desc_base = (struct ixgb_rx_desc *) sc->rxdma.dma_vaddr;
228 
229 	/* Initialize the hardware */
230 	if (ixgb_hardware_init(sc)) {
231 		printf("%s: Unable to initialize the hardware\n",
232 		       sc->sc_dv.dv_xname);
233 		goto err_hw_init;
234 	}
235 
236 	/* Setup OS specific network interface */
237 	ixgb_setup_interface(sc);
238 
239 	/* Initialize statistics */
240 	ixgb_clear_hw_cntrs(&sc->hw);
241 	ixgb_update_stats_counters(sc);
242 	ixgb_update_link_status(sc);
243 
244 	printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
245 
246 	INIT_DEBUGOUT("ixgb_attach: end");
247 	return;
248 
249 err_hw_init:
250 	ixgb_dma_free(sc, &sc->rxdma);
251 err_rx_desc:
252 	ixgb_dma_free(sc, &sc->txdma);
253 err_tx_desc:
254 err_pci:
255 	ixgb_free_pci_resources(sc);
256 }
257 
258 /*********************************************************************
259  *  Transmit entry point
260  *
261  *  ixgb_start is called by the stack to initiate a transmit.
262  *  The driver will remain in this routine as long as there are
263  *  packets to transmit and transmit resources are available.
264  *  In case resources are not available stack is notified and
265  *  the packet is requeued.
266  **********************************************************************/
267 
268 void
269 ixgb_start(struct ifnet *ifp)
270 {
271 	struct mbuf    *m_head;
272 	struct ixgb_softc *sc = ifp->if_softc;
273 	int		post = 0;
274 
275 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
276 		return;
277 
278 	if (!sc->link_active)
279 		return;
280 
281 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
282 	    sc->txdma.dma_map->dm_mapsize,
283 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
284 
285 	for (;;) {
286 		m_head = ifq_deq_begin(&ifp->if_snd);
287 		if (m_head == NULL)
288 			break;
289 
290 		if (ixgb_encap(sc, m_head)) {
291 			ifq_deq_rollback(&ifp->if_snd, m_head);
292 			ifq_set_oactive(&ifp->if_snd);
293 			break;
294 		}
295 
296 		ifq_deq_commit(&ifp->if_snd, m_head);
297 
298 #if NBPFILTER > 0
299 		/* Send a copy of the frame to the BPF listener */
300 		if (ifp->if_bpf)
301 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
302 #endif
303 
304 		/* Set timeout in case hardware has problems transmitting */
305 		ifp->if_timer = IXGB_TX_TIMEOUT;
306 
307 		post = 1;
308 	}
309 
310 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
311 	    sc->txdma.dma_map->dm_mapsize,
312 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
313 	/*
314 	 * Advance the Transmit Descriptor Tail (Tdt),
315 	 * this tells the E1000 that this frame
316 	 * is available to transmit.
317 	 */
318 	if (post)
319 		IXGB_WRITE_REG(&sc->hw, TDT, sc->next_avail_tx_desc);
320 }
321 
322 /*********************************************************************
323  *  Ioctl entry point
324  *
325  *  ixgb_ioctl is called when the user wants to configure the
326  *  interface.
327  *
328  *  return 0 on success, positive on failure
329  **********************************************************************/
330 
331 int
332 ixgb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
333 {
334 	struct ixgb_softc *sc = ifp->if_softc;
335 	struct ifreq	*ifr = (struct ifreq *) data;
336 	int		s, error = 0;
337 
338 	s = splnet();
339 
340 	switch (command) {
341 	case SIOCSIFADDR:
342 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
343 			       "Addr)");
344 		ifp->if_flags |= IFF_UP;
345 		if (!(ifp->if_flags & IFF_RUNNING))
346 			ixgb_init(sc);
347 		break;
348 
349 	case SIOCSIFFLAGS:
350 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
351 		if (ifp->if_flags & IFF_UP) {
352 			/*
353 			 * If only the PROMISC or ALLMULTI flag changes, then
354 			 * don't do a full re-init of the chip, just update
355 			 * the Rx filter.
356 			 */
357 			if ((ifp->if_flags & IFF_RUNNING) &&
358 			    ((ifp->if_flags ^ sc->if_flags) &
359 			     (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
360 				ixgb_set_promisc(sc);
361 			} else {
362 				if (!(ifp->if_flags & IFF_RUNNING))
363 					ixgb_init(sc);
364 			}
365 		} else {
366 			if (ifp->if_flags & IFF_RUNNING)
367 				ixgb_stop(sc);
368 		}
369 		sc->if_flags = ifp->if_flags;
370 		break;
371 
372 	case SIOCSIFMEDIA:
373 	case SIOCGIFMEDIA:
374 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
375 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
376 		break;
377 
378 	default:
379 		error = ether_ioctl(ifp, &sc->interface_data, command, data);
380 	}
381 
382 	if (error == ENETRESET) {
383 		if (ifp->if_flags & IFF_RUNNING) {
384 			ixgb_disable_intr(sc);
385 			ixgb_set_multi(sc);
386 			ixgb_enable_intr(sc);
387 		}
388 		error = 0;
389 	}
390 
391 	splx(s);
392 	return (error);
393 }
394 
395 /*********************************************************************
396  *  Watchdog entry point
397  *
398  *  This routine is called whenever hardware quits transmitting.
399  *
400  **********************************************************************/
401 
402 void
403 ixgb_watchdog(struct ifnet * ifp)
404 {
405 	struct ixgb_softc *sc = ifp->if_softc;
406 
407 	/*
408 	 * If we are in this routine because of pause frames, then don't
409 	 * reset the hardware.
410 	 */
411 	if (IXGB_READ_REG(&sc->hw, STATUS) & IXGB_STATUS_TXOFF) {
412 		ifp->if_timer = IXGB_TX_TIMEOUT;
413 		return;
414 	}
415 
416 	printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
417 
418 	ixgb_init(sc);
419 
420 	sc->watchdog_events++;
421 }
422 
423 /*********************************************************************
424  *  Init entry point
425  *
426  *  This routine is used in two ways. It is used by the stack as
427  *  init entry point in network interface structure. It is also used
428  *  by the driver as a hw/sw initialization routine to get to a
429  *  consistent state.
430  *
431  **********************************************************************/
432 
433 void
434 ixgb_init(void *arg)
435 {
436 	struct ixgb_softc *sc = arg;
437 	struct ifnet   *ifp = &sc->interface_data.ac_if;
438 	uint32_t temp_reg;
439 	int s;
440 
441 	INIT_DEBUGOUT("ixgb_init: begin");
442 
443 	s = splnet();
444 
445 	ixgb_stop(sc);
446 
447 	/* Get the latest mac address, User can use a LAA */
448 	bcopy(sc->interface_data.ac_enaddr, sc->hw.curr_mac_addr,
449 	      IXGB_ETH_LENGTH_OF_ADDRESS);
450 
451 	/* Initialize the hardware */
452 	if (ixgb_hardware_init(sc)) {
453 		printf("%s: Unable to initialize the hardware\n",
454 		       sc->sc_dv.dv_xname);
455 		splx(s);
456 		return;
457 	}
458 
459 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
460 		ixgb_enable_hw_vlans(sc);
461 
462 	/* Prepare transmit descriptors and buffers */
463 	if (ixgb_setup_transmit_structures(sc)) {
464 		printf("%s: Could not setup transmit structures\n",
465 		       sc->sc_dv.dv_xname);
466 		ixgb_stop(sc);
467 		splx(s);
468 		return;
469 	}
470 	ixgb_initialize_transmit_unit(sc);
471 
472 	/* Setup Multicast table */
473 	ixgb_set_multi(sc);
474 
475 	/* Prepare receive descriptors and buffers */
476 	if (ixgb_setup_receive_structures(sc)) {
477 		printf("%s: Could not setup receive structures\n",
478 		       sc->sc_dv.dv_xname);
479 		ixgb_stop(sc);
480 		splx(s);
481 		return;
482 	}
483 	ixgb_initialize_receive_unit(sc);
484 
485 	/* Don't lose promiscuous settings */
486 	ixgb_set_promisc(sc);
487 
488 	ifp->if_flags |= IFF_RUNNING;
489 	ifq_clr_oactive(&ifp->if_snd);
490 
491 	/* Enable jumbo frames */
492 	IXGB_WRITE_REG(&sc->hw, MFRMS,
493 	    sc->hw.max_frame_size << IXGB_MFRMS_SHIFT);
494 	temp_reg = IXGB_READ_REG(&sc->hw, CTRL0);
495 	temp_reg |= IXGB_CTRL0_JFE;
496 	IXGB_WRITE_REG(&sc->hw, CTRL0, temp_reg);
497 
498 	timeout_add_sec(&sc->timer_handle, 1);
499 	ixgb_clear_hw_cntrs(&sc->hw);
500 	ixgb_enable_intr(sc);
501 
502 	splx(s);
503 }
504 
505 /*********************************************************************
506  *
507  *  Interrupt Service routine
508  *
509  **********************************************************************/
510 
511 int
512 ixgb_intr(void *arg)
513 {
514 	struct ixgb_softc *sc = arg;
515 	struct ifnet	*ifp;
516 	u_int32_t	reg_icr;
517 	boolean_t	rxdmt0 = FALSE;
518 	int claimed = 0;
519 
520 	ifp = &sc->interface_data.ac_if;
521 
522 	for (;;) {
523 		reg_icr = IXGB_READ_REG(&sc->hw, ICR);
524 		if (reg_icr == 0)
525 			break;
526 
527 		claimed = 1;
528 
529 		if (reg_icr & IXGB_INT_RXDMT0)
530 			rxdmt0 = TRUE;
531 
532 		if (ifp->if_flags & IFF_RUNNING) {
533 			ixgb_rxeof(sc, -1);
534 			ixgb_txeof(sc);
535 		}
536 
537 		/* Link status change */
538 		if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
539 			timeout_del(&sc->timer_handle);
540 			ixgb_check_for_link(&sc->hw);
541 			ixgb_update_link_status(sc);
542 			timeout_add_sec(&sc->timer_handle, 1);
543 		}
544 
545 		if (rxdmt0 && sc->raidc) {
546 			IXGB_WRITE_REG(&sc->hw, IMC, IXGB_INT_RXDMT0);
547 			IXGB_WRITE_REG(&sc->hw, IMS, IXGB_INT_RXDMT0);
548 		}
549 	}
550 
551 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
552 		ixgb_start(ifp);
553 
554 	return (claimed);
555 }
556 
557 
558 /*********************************************************************
559  *
560  *  Media Ioctl callback
561  *
562  *  This routine is called whenever the user queries the status of
563  *  the interface using ifconfig.
564  *
565  **********************************************************************/
566 void
567 ixgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
568 {
569 	struct ixgb_softc *sc = ifp->if_softc;
570 
571 	INIT_DEBUGOUT("ixgb_media_status: begin");
572 
573 	ixgb_check_for_link(&sc->hw);
574 	ixgb_update_link_status(sc);
575 
576 	ifmr->ifm_status = IFM_AVALID;
577 	ifmr->ifm_active = IFM_ETHER;
578 
579 	if (!sc->hw.link_up) {
580 		ifmr->ifm_active |= IFM_NONE;
581 		return;
582 	}
583 
584 	ifmr->ifm_status |= IFM_ACTIVE;
585 	if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
586 	    (sc->hw.phy_type == ixgb_phy_type_txn17401))
587 		ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
588 	else
589 		ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
590 
591 	return;
592 }
593 
594 /*********************************************************************
595  *
596  *  Media Ioctl callback
597  *
598  *  This routine is called when the user changes speed/duplex using
599  *  media/mediopt option with ifconfig.
600  *
601  **********************************************************************/
602 int
603 ixgb_media_change(struct ifnet * ifp)
604 {
605 	struct ixgb_softc *sc = ifp->if_softc;
606 	struct ifmedia *ifm = &sc->media;
607 
608 	INIT_DEBUGOUT("ixgb_media_change: begin");
609 
610 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
611 		return (EINVAL);
612 
613 	return (0);
614 }
615 
616 /*********************************************************************
617  *
618  *  This routine maps the mbufs to tx descriptors.
619  *
620  *  return 0 on success, positive on failure
621  **********************************************************************/
622 
623 int
624 ixgb_encap(struct ixgb_softc *sc, struct mbuf *m_head)
625 {
626 	u_int8_t        txd_popts;
627 	int             i, j, error = 0;
628 	bus_dmamap_t	map;
629 
630 	struct ixgb_buffer *tx_buffer;
631 	struct ixgb_tx_desc *current_tx_desc = NULL;
632 
633 	/*
634 	 * Force a cleanup if number of TX descriptors available hits the
635 	 * threshold
636 	 */
637 	if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
638 		ixgb_txeof(sc);
639 		/* Now do we at least have a minimal? */
640 		if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
641 			sc->no_tx_desc_avail1++;
642 			return (ENOBUFS);
643 		}
644 	}
645 
646 	/*
647 	 * Map the packet for DMA.
648 	 */
649 	tx_buffer = &sc->tx_buffer_area[sc->next_avail_tx_desc];
650 	map = tx_buffer->map;
651 
652 	error = bus_dmamap_load_mbuf(sc->txtag, map,
653 				     m_head, BUS_DMA_NOWAIT);
654 	if (error != 0) {
655 		sc->no_tx_dma_setup++;
656 		return (error);
657 	}
658 	IXGB_KASSERT(map->dm_nsegs != 0, ("ixgb_encap: empty packet"));
659 
660 	if (map->dm_nsegs > sc->num_tx_desc_avail)
661 		goto fail;
662 
663 #ifdef IXGB_CSUM_OFFLOAD
664 	ixgb_transmit_checksum_setup(sc, m_head, &txd_popts);
665 #else
666 	txd_popts = 0;
667 #endif
668 
669 	i = sc->next_avail_tx_desc;
670 	for (j = 0; j < map->dm_nsegs; j++) {
671 		tx_buffer = &sc->tx_buffer_area[i];
672 		current_tx_desc = &sc->tx_desc_base[i];
673 
674 		current_tx_desc->buff_addr = htole64(map->dm_segs[j].ds_addr);
675 		current_tx_desc->cmd_type_len = htole32((sc->txd_cmd | map->dm_segs[j].ds_len));
676 		current_tx_desc->popts = txd_popts;
677 		if (++i == sc->num_tx_desc)
678 			i = 0;
679 
680 		tx_buffer->m_head = NULL;
681 	}
682 
683 	sc->num_tx_desc_avail -= map->dm_nsegs;
684 	sc->next_avail_tx_desc = i;
685 
686 	/* Find out if we are in VLAN mode */
687 	if (m_head->m_flags & M_VLANTAG) {
688 		/* Set the VLAN id */
689 		current_tx_desc->vlan = htole16(m_head->m_pkthdr.ether_vtag);
690 
691 		/* Tell hardware to add tag */
692 		current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_VLE);
693 	}
694 
695 	tx_buffer->m_head = m_head;
696 	bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
697 	    BUS_DMASYNC_PREWRITE);
698 
699 	/*
700 	 * Last Descriptor of Packet needs End Of Packet (EOP)
701 	 */
702 	current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_EOP);
703 
704 	return (0);
705 
706 fail:
707 	sc->no_tx_desc_avail2++;
708 	bus_dmamap_unload(sc->txtag, map);
709 	return (ENOBUFS);
710 }
711 
712 void
713 ixgb_set_promisc(struct ixgb_softc *sc)
714 {
715 
716 	u_int32_t       reg_rctl;
717 	struct ifnet   *ifp = &sc->interface_data.ac_if;
718 
719 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
720 
721 	if (ifp->if_flags & IFF_PROMISC) {
722 		reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
723 	} else if (ifp->if_flags & IFF_ALLMULTI) {
724 		reg_rctl |= IXGB_RCTL_MPE;
725 		reg_rctl &= ~IXGB_RCTL_UPE;
726 	} else {
727 		reg_rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
728 	}
729 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
730 }
731 
732 /*********************************************************************
733  *  Multicast Update
734  *
735  *  This routine is called whenever multicast address list is updated.
736  *
737  **********************************************************************/
738 
739 void
740 ixgb_set_multi(struct ixgb_softc *sc)
741 {
742 	u_int32_t       reg_rctl = 0;
743 	u_int8_t        mta[MAX_NUM_MULTICAST_ADDRESSES * IXGB_ETH_LENGTH_OF_ADDRESS];
744 	int             mcnt = 0;
745 	struct ifnet   *ifp = &sc->interface_data.ac_if;
746 	struct arpcom *ac = &sc->interface_data;
747 	struct ether_multi *enm;
748 	struct ether_multistep step;
749 
750 	IOCTL_DEBUGOUT("ixgb_set_multi: begin");
751 
752 	if (ac->ac_multirangecnt > 0) {
753 		ifp->if_flags |= IFF_ALLMULTI;
754 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
755 		goto setit;
756 	}
757 
758 	ETHER_FIRST_MULTI(step, ac, enm);
759 	while (enm != NULL) {
760 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
761 			break;
762 		bcopy(enm->enm_addrlo, &mta[mcnt*IXGB_ETH_LENGTH_OF_ADDRESS],
763 		      IXGB_ETH_LENGTH_OF_ADDRESS);
764 		mcnt++;
765 		ETHER_NEXT_MULTI(step, enm);
766 	}
767 
768 setit:
769 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
770 		reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
771 		reg_rctl |= IXGB_RCTL_MPE;
772 		IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
773 	} else
774 		ixgb_mc_addr_list_update(&sc->hw, mta, mcnt, 0);
775 }
776 
777 
778 /*********************************************************************
779  *  Timer routine
780  *
781  *  This routine checks for link status and updates statistics.
782  *
783  **********************************************************************/
784 
785 void
786 ixgb_local_timer(void *arg)
787 {
788 	struct ifnet   *ifp;
789 	struct ixgb_softc *sc = arg;
790 	int s;
791 
792 	ifp = &sc->interface_data.ac_if;
793 
794 	s = splnet();
795 
796 	ixgb_check_for_link(&sc->hw);
797 	ixgb_update_link_status(sc);
798 	ixgb_update_stats_counters(sc);
799 #ifdef IXGB_DEBUG
800 	if (ixgb_display_debug_stats && ifp->if_flags & IFF_RUNNING)
801 		ixgb_print_hw_stats(sc);
802 #endif
803 
804 	timeout_add_sec(&sc->timer_handle, 1);
805 
806 	splx(s);
807 }
808 
809 void
810 ixgb_update_link_status(struct ixgb_softc *sc)
811 {
812 	struct ifnet *ifp = &sc->interface_data.ac_if;
813 
814 	if (sc->hw.link_up) {
815 		if (!sc->link_active) {
816 			ifp->if_baudrate = IF_Gbps(10);
817 			sc->link_active = 1;
818 			ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
819 			if_link_state_change(ifp);
820 		}
821 	} else {
822 		if (sc->link_active) {
823 			ifp->if_baudrate = 0;
824 			sc->link_active = 0;
825 			ifp->if_link_state = LINK_STATE_DOWN;
826 			if_link_state_change(ifp);
827 		}
828 	}
829 }
830 
831 /*********************************************************************
832  *
833  *  This routine disables all traffic on the adapter by issuing a
834  *  global reset on the MAC and deallocates TX/RX buffers.
835  *
836  **********************************************************************/
837 
838 void
839 ixgb_stop(void *arg)
840 {
841 	struct ifnet   *ifp;
842 	struct ixgb_softc *sc = arg;
843 	ifp = &sc->interface_data.ac_if;
844 
845 	INIT_DEBUGOUT("ixgb_stop: begin\n");
846 	ixgb_disable_intr(sc);
847 	sc->hw.adapter_stopped = FALSE;
848 	ixgb_adapter_stop(&sc->hw);
849 	timeout_del(&sc->timer_handle);
850 
851 	/* Tell the stack that the interface is no longer active */
852 	ifp->if_flags &= ~IFF_RUNNING;
853 	ifq_clr_oactive(&ifp->if_snd);
854 
855 	ixgb_free_transmit_structures(sc);
856 	ixgb_free_receive_structures(sc);
857 }
858 
859 
860 /*********************************************************************
861  *
862  *  Determine hardware revision.
863  *
864  **********************************************************************/
865 void
866 ixgb_identify_hardware(struct ixgb_softc *sc)
867 {
868 	u_int32_t	reg;
869 	struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
870 
871 	/* Make sure our PCI config space has the necessary stuff set */
872 	sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
873 					    PCI_COMMAND_STATUS_REG);
874 
875 	/* Save off the information about this board */
876 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
877 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
878 
879 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
880 	sc->hw.revision_id = PCI_REVISION(reg);
881 
882 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
883 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
884 	sc->hw.subsystem_id = PCI_PRODUCT(reg);
885 
886 	/* Set MacType, etc. based on this PCI info */
887 	switch (sc->hw.device_id) {
888 	case IXGB_DEVICE_ID_82597EX:
889 	case IXGB_DEVICE_ID_82597EX_SR:
890 	case IXGB_DEVICE_ID_82597EX_LR:
891 	case IXGB_DEVICE_ID_82597EX_CX4:
892 		sc->hw.mac_type = ixgb_82597;
893 		break;
894 	default:
895 		INIT_DEBUGOUT1("Unknown device if 0x%x", sc->hw.device_id);
896 		printf("%s: unsupported device id 0x%x\n",
897 		    sc->sc_dv.dv_xname, sc->hw.device_id);
898 	}
899 }
900 
901 int
902 ixgb_allocate_pci_resources(struct ixgb_softc *sc)
903 
904 {
905 	int val;
906 	pci_intr_handle_t	ih;
907 	const char		*intrstr = NULL;
908 	struct pci_attach_args *pa =  &sc->osdep.ixgb_pa;
909 	pci_chipset_tag_t	pc = pa->pa_pc;
910 
911 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, IXGB_MMBA);
912 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
913 		printf(": mmba is not mem space\n");
914 		return (ENXIO);
915 	}
916 	if (pci_mapreg_map(pa, IXGB_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
917 	    &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
918 	    &sc->osdep.ixgb_membase, &sc->osdep.ixgb_memsize, 0)) {
919 		printf(": cannot find mem space\n");
920 		return (ENXIO);
921 	}
922 
923 	if (pci_intr_map(pa, &ih)) {
924 		printf(": couldn't map interrupt\n");
925 		return (ENXIO);
926 	}
927 
928 	sc->hw.back = &sc->osdep;
929 
930 	intrstr = pci_intr_string(pc, ih);
931 	sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, ixgb_intr, sc,
932 					    sc->sc_dv.dv_xname);
933 	if (sc->sc_intrhand == NULL) {
934 		printf(": couldn't establish interrupt");
935 		if (intrstr != NULL)
936 			printf(" at %s", intrstr);
937 		printf("\n");
938 		return (ENXIO);
939 	}
940 	printf(": %s", intrstr);
941 
942 	return (0);
943 }
944 
945 void
946 ixgb_free_pci_resources(struct ixgb_softc *sc)
947 {
948 	struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
949 	pci_chipset_tag_t	pc = pa->pa_pc;
950 
951 	if (sc->sc_intrhand)
952 		pci_intr_disestablish(pc, sc->sc_intrhand);
953 	sc->sc_intrhand = 0;
954 
955 	if (sc->osdep.ixgb_membase)
956 		bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
957 				sc->osdep.ixgb_memsize);
958 	sc->osdep.ixgb_membase = 0;
959 }
960 
961 /*********************************************************************
962  *
963  *  Initialize the hardware to a configuration as specified by the
964  *  adapter structure. The controller is reset, the EEPROM is
965  *  verified, the MAC address is set, then the shared initialization
966  *  routines are called.
967  *
968  **********************************************************************/
969 int
970 ixgb_hardware_init(struct ixgb_softc *sc)
971 {
972 	/* Issue a global reset */
973 	sc->hw.adapter_stopped = FALSE;
974 	ixgb_adapter_stop(&sc->hw);
975 
976 	/* Make sure we have a good EEPROM before we read from it */
977 	if (!ixgb_validate_eeprom_checksum(&sc->hw)) {
978 		printf("%s: The EEPROM Checksum Is Not Valid\n",
979 		       sc->sc_dv.dv_xname);
980 		return (EIO);
981 	}
982 	if (!ixgb_init_hw(&sc->hw)) {
983 		printf("%s: Hardware Initialization Failed",
984 		       sc->sc_dv.dv_xname);
985 		return (EIO);
986 	}
987 	bcopy(sc->hw.curr_mac_addr, sc->interface_data.ac_enaddr,
988 	      IXGB_ETH_LENGTH_OF_ADDRESS);
989 
990 	return (0);
991 }
992 
993 /*********************************************************************
994  *
995  *  Setup networking device structure and register an interface.
996  *
997  **********************************************************************/
998 void
999 ixgb_setup_interface(struct ixgb_softc *sc)
1000 {
1001 	struct ifnet   *ifp;
1002 	INIT_DEBUGOUT("ixgb_setup_interface: begin");
1003 
1004 	ifp = &sc->interface_data.ac_if;
1005 	strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
1006 
1007 	ifp->if_softc = sc;
1008 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1009 	ifp->if_ioctl = ixgb_ioctl;
1010 	ifp->if_start = ixgb_start;
1011 	ifp->if_watchdog = ixgb_watchdog;
1012 	ifp->if_hardmtu =
1013 		IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN - ETHER_CRC_LEN;
1014 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1015 
1016 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1017 
1018 #if NVLAN > 0
1019 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1020 #endif
1021 
1022 #ifdef IXGB_CSUM_OFFLOAD
1023 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4;
1024 #endif
1025 
1026 	/*
1027 	 * Specify the media types supported by this adapter and register
1028 	 * callbacks to update media and link information
1029 	 */
1030 	ifmedia_init(&sc->media, IFM_IMASK, ixgb_media_change,
1031 		     ixgb_media_status);
1032 	if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
1033 	    (sc->hw.phy_type == ixgb_phy_type_txn17401)) {
1034 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR |
1035 		    IFM_FDX, 0, NULL);
1036 	} else {
1037 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR |
1038 		    IFM_FDX, 0, NULL);
1039 	}
1040 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1041 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1042 
1043 	if_attach(ifp);
1044 	ether_ifattach(ifp);
1045 }
1046 
1047 /********************************************************************
1048  * Manage DMA'able memory.
1049  *******************************************************************/
1050 int
1051 ixgb_dma_malloc(struct ixgb_softc *sc, bus_size_t size,
1052 		struct ixgb_dma_alloc * dma, int mapflags)
1053 {
1054 	int r;
1055 
1056 	dma->dma_tag = sc->osdep.ixgb_pa.pa_dmat;
1057 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1058 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1059 	if (r != 0) {
1060 		printf("%s: ixgb_dma_malloc: bus_dmamap_create failed; "
1061 			"error %u\n", sc->sc_dv.dv_xname, r);
1062 		goto fail_0;
1063 	}
1064 
1065 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1066 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1067 	if (r != 0) {
1068 		printf("%s: ixgb_dma_malloc: bus_dmammem_alloc failed; "
1069 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
1070 			(unsigned long)size, r);
1071 		goto fail_1;
1072 	}
1073 
1074 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1075 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1076 	if (r != 0) {
1077 		printf("%s: ixgb_dma_malloc: bus_dmammem_map failed; "
1078 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
1079 			(unsigned long)size, r);
1080 		goto fail_2;
1081 	}
1082 
1083 	r = bus_dmamap_load(sc->osdep.ixgb_pa.pa_dmat, dma->dma_map,
1084 			    dma->dma_vaddr, size, NULL,
1085 			    mapflags | BUS_DMA_NOWAIT);
1086 	if (r != 0) {
1087 		printf("%s: ixgb_dma_malloc: bus_dmamap_load failed; "
1088 			"error %u\n", sc->sc_dv.dv_xname, r);
1089 		goto fail_3;
1090 	}
1091 
1092 	dma->dma_size = size;
1093 	return (0);
1094 
1095 fail_3:
1096 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1097 fail_2:
1098 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1099 fail_1:
1100 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1101 fail_0:
1102 	dma->dma_map = NULL;
1103 	dma->dma_tag = NULL;
1104 
1105 	return (r);
1106 }
1107 
1108 void
1109 ixgb_dma_free(struct ixgb_softc *sc, struct ixgb_dma_alloc *dma)
1110 {
1111 	if (dma->dma_tag == NULL)
1112 		return;
1113 
1114 	if (dma->dma_map != NULL) {
1115 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1116 		    dma->dma_map->dm_mapsize,
1117 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1118 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1119 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1120 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1121 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1122 	}
1123 }
1124 
1125 /*********************************************************************
1126  *
1127  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1128  *  the information needed to transmit a packet on the wire.
1129  *
1130  **********************************************************************/
1131 int
1132 ixgb_allocate_transmit_structures(struct ixgb_softc *sc)
1133 {
1134 	if (!(sc->tx_buffer_area = mallocarray(sc->num_tx_desc,
1135 	    sizeof(struct ixgb_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1136 		printf("%s: Unable to allocate tx_buffer memory\n",
1137 		       sc->sc_dv.dv_xname);
1138 		return (ENOMEM);
1139 	}
1140 
1141 	return (0);
1142 }
1143 
1144 /*********************************************************************
1145  *
1146  *  Allocate and initialize transmit structures.
1147  *
1148  **********************************************************************/
1149 int
1150 ixgb_setup_transmit_structures(struct ixgb_softc *sc)
1151 {
1152 	struct	ixgb_buffer *tx_buffer;
1153 	int error, i;
1154 
1155 	if ((error = ixgb_allocate_transmit_structures(sc)) != 0)
1156 		goto fail;
1157 
1158 	bzero((void *)sc->tx_desc_base,
1159 	      (sizeof(struct ixgb_tx_desc)) * sc->num_tx_desc);
1160 
1161 	sc->txtag = sc->osdep.ixgb_pa.pa_dmat;
1162 
1163 	tx_buffer = sc->tx_buffer_area;
1164 	for (i = 0; i < sc->num_tx_desc; i++) {
1165 		error = bus_dmamap_create(sc->txtag, IXGB_MAX_JUMBO_FRAME_SIZE,
1166 			    IXGB_MAX_SCATTER, IXGB_MAX_JUMBO_FRAME_SIZE, 0,
1167 			    BUS_DMA_NOWAIT, &tx_buffer->map);
1168 		if (error != 0) {
1169 			printf("%s: Unable to create TX DMA map\n",
1170 			    sc->sc_dv.dv_xname);
1171 			goto fail;
1172 		}
1173 		tx_buffer++;
1174 	}
1175 
1176 	sc->next_avail_tx_desc = 0;
1177 	sc->oldest_used_tx_desc = 0;
1178 
1179 	/* Set number of descriptors available */
1180 	sc->num_tx_desc_avail = sc->num_tx_desc;
1181 
1182 	/* Set checksum context */
1183 	sc->active_checksum_context = OFFLOAD_NONE;
1184 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1185 	   sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1186 
1187 	return (0);
1188 
1189 fail:
1190 	ixgb_free_transmit_structures(sc);
1191 	return (error);
1192 }
1193 
1194 /*********************************************************************
1195  *
1196  *  Enable transmit unit.
1197  *
1198  **********************************************************************/
1199 void
1200 ixgb_initialize_transmit_unit(struct ixgb_softc *sc)
1201 {
1202 	u_int32_t       reg_tctl;
1203 	u_int64_t       bus_addr;
1204 
1205 	/* Setup the Base and Length of the Tx Descriptor Ring */
1206 	bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
1207 	IXGB_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
1208 	IXGB_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
1209 	IXGB_WRITE_REG(&sc->hw, TDLEN,
1210 		       sc->num_tx_desc *
1211 		       sizeof(struct ixgb_tx_desc));
1212 
1213 	/* Setup the HW Tx Head and Tail descriptor pointers */
1214 	IXGB_WRITE_REG(&sc->hw, TDH, 0);
1215 	IXGB_WRITE_REG(&sc->hw, TDT, 0);
1216 
1217 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
1218 		     IXGB_READ_REG(&sc->hw, TDBAL),
1219 		     IXGB_READ_REG(&sc->hw, TDLEN));
1220 
1221 	IXGB_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
1222 
1223 	/* Program the Transmit Control Register */
1224 	reg_tctl = IXGB_READ_REG(&sc->hw, TCTL);
1225 	reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1226 	IXGB_WRITE_REG(&sc->hw, TCTL, reg_tctl);
1227 
1228 	/* Setup Transmit Descriptor Settings for this adapter */
1229 	sc->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1230 
1231 	if (sc->tx_int_delay > 0)
1232 		sc->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1233 }
1234 
1235 /*********************************************************************
1236  *
1237  *  Free all transmit related data structures.
1238  *
1239  **********************************************************************/
1240 void
1241 ixgb_free_transmit_structures(struct ixgb_softc *sc)
1242 {
1243 	struct ixgb_buffer *tx_buffer;
1244 	int             i;
1245 
1246 	INIT_DEBUGOUT("free_transmit_structures: begin");
1247 
1248 	if (sc->tx_buffer_area != NULL) {
1249 		tx_buffer = sc->tx_buffer_area;
1250 		for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
1251 			if (tx_buffer->map != NULL &&
1252 			    tx_buffer->map->dm_nsegs > 0) {
1253 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
1254 				    0, tx_buffer->map->dm_mapsize,
1255 				    BUS_DMASYNC_POSTWRITE);
1256 				bus_dmamap_unload(sc->txtag,
1257 				    tx_buffer->map);
1258 			}
1259 
1260 			if (tx_buffer->m_head != NULL) {
1261 				m_freem(tx_buffer->m_head);
1262 				tx_buffer->m_head = NULL;
1263 			}
1264 			if (tx_buffer->map != NULL) {
1265 				bus_dmamap_destroy(sc->txtag,
1266 				    tx_buffer->map);
1267 				tx_buffer->map = NULL;
1268 			}
1269 		}
1270 	}
1271 	if (sc->tx_buffer_area != NULL) {
1272 		free(sc->tx_buffer_area, M_DEVBUF, 0);
1273 		sc->tx_buffer_area = NULL;
1274 	}
1275 	if (sc->txtag != NULL) {
1276 		sc->txtag = NULL;
1277 	}
1278 }
1279 
1280 /*********************************************************************
1281  *
1282  *  The offload context needs to be set when we transfer the first
1283  *  packet of a particular protocol (TCP/UDP). We change the
1284  *  context only if the protocol type changes.
1285  *
1286  **********************************************************************/
1287 void
1288 ixgb_transmit_checksum_setup(struct ixgb_softc *sc,
1289 			     struct mbuf *mp,
1290 			     u_int8_t *txd_popts)
1291 {
1292 	struct ixgb_context_desc *TXD;
1293 	struct ixgb_buffer *tx_buffer;
1294 	int             curr_txd;
1295 
1296 	if (mp->m_pkthdr.csum_flags) {
1297 
1298 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) {
1299 			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1300 			if (sc->active_checksum_context == OFFLOAD_TCP_IP)
1301 				return;
1302 			else
1303 				sc->active_checksum_context = OFFLOAD_TCP_IP;
1304 
1305 		} else if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) {
1306 			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1307 			if (sc->active_checksum_context == OFFLOAD_UDP_IP)
1308 				return;
1309 			else
1310 				sc->active_checksum_context = OFFLOAD_UDP_IP;
1311 		} else {
1312 			*txd_popts = 0;
1313 			return;
1314 		}
1315 	} else {
1316 		*txd_popts = 0;
1317 		return;
1318 	}
1319 
1320 	/*
1321 	 * If we reach this point, the checksum offload context needs to be
1322 	 * reset.
1323 	 */
1324 	curr_txd = sc->next_avail_tx_desc;
1325 	tx_buffer = &sc->tx_buffer_area[curr_txd];
1326 	TXD = (struct ixgb_context_desc *) & sc->tx_desc_base[curr_txd];
1327 
1328 	TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1329 	TXD->tucse = 0;
1330 
1331 	TXD->mss = 0;
1332 
1333 	if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
1334 		TXD->tucso =
1335 			ENET_HEADER_SIZE + sizeof(struct ip) +
1336 			offsetof(struct tcphdr, th_sum);
1337 	} else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
1338 		TXD->tucso =
1339 			ENET_HEADER_SIZE + sizeof(struct ip) +
1340 			offsetof(struct udphdr, uh_sum);
1341 	}
1342 	TXD->cmd_type_len = htole32(IXGB_CONTEXT_DESC_CMD_TCP |
1343 	    IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE);
1344 
1345 	tx_buffer->m_head = NULL;
1346 
1347 	if (++curr_txd == sc->num_tx_desc)
1348 		curr_txd = 0;
1349 
1350 	sc->num_tx_desc_avail--;
1351 	sc->next_avail_tx_desc = curr_txd;
1352 }
1353 
1354 /**********************************************************************
1355  *
1356  *  Examine each tx_buffer in the used queue. If the hardware is done
1357  *  processing the packet then free associated resources. The
1358  *  tx_buffer is put back on the free queue.
1359  *
1360  **********************************************************************/
1361 void
1362 ixgb_txeof(struct ixgb_softc *sc)
1363 {
1364 	int             i, num_avail;
1365 	struct ixgb_buffer *tx_buffer;
1366 	struct ixgb_tx_desc *tx_desc;
1367 	struct ifnet	*ifp = &sc->interface_data.ac_if;
1368 
1369 	if (sc->num_tx_desc_avail == sc->num_tx_desc)
1370 		return;
1371 
1372 	num_avail = sc->num_tx_desc_avail;
1373 	i = sc->oldest_used_tx_desc;
1374 
1375 	tx_buffer = &sc->tx_buffer_area[i];
1376 	tx_desc = &sc->tx_desc_base[i];
1377 
1378 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1379 	    sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1380 	while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1381 
1382 		tx_desc->status = 0;
1383 		num_avail++;
1384 
1385 		if (tx_buffer->m_head != NULL) {
1386 			ifp->if_opackets++;
1387 
1388 			if (tx_buffer->map->dm_nsegs > 0) {
1389 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
1390 				    0, tx_buffer->map->dm_mapsize,
1391 				    BUS_DMASYNC_POSTWRITE);
1392 				bus_dmamap_unload(sc->txtag, tx_buffer->map);
1393 			}
1394 
1395 			m_freem(tx_buffer->m_head);
1396 			tx_buffer->m_head = NULL;
1397 		}
1398 		if (++i == sc->num_tx_desc)
1399 			i = 0;
1400 
1401 		tx_buffer = &sc->tx_buffer_area[i];
1402 		tx_desc = &sc->tx_desc_base[i];
1403 	}
1404 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1405 	    sc->txdma.dma_map->dm_mapsize,
1406 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1407 
1408 	sc->oldest_used_tx_desc = i;
1409 
1410 	/*
1411 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
1412 	 * it is OK to send packets. If there are no pending descriptors,
1413 	 * clear the timeout. Otherwise, if some descriptors have been freed,
1414 	 * restart the timeout.
1415 	 */
1416 	if (num_avail > IXGB_TX_CLEANUP_THRESHOLD)
1417 		ifq_clr_oactive(&ifp->if_snd);
1418 
1419 	/* All clean, turn off the timer */
1420 	if (num_avail == sc->num_tx_desc)
1421 		ifp->if_timer = 0;
1422 	/* Some cleaned, reset the timer */
1423 	else if (num_avail != sc->num_tx_desc_avail)
1424 		ifp->if_timer = IXGB_TX_TIMEOUT;
1425 
1426 	sc->num_tx_desc_avail = num_avail;
1427 }
1428 
1429 
1430 /*********************************************************************
1431  *
1432  *  Get a buffer from system mbuf buffer pool.
1433  *
1434  **********************************************************************/
1435 int
1436 ixgb_get_buf(struct ixgb_softc *sc, int i,
1437 	     struct mbuf *nmp)
1438 {
1439 	struct mbuf *mp = nmp;
1440 	struct ixgb_buffer *rx_buffer;
1441 	int             error;
1442 
1443 	if (mp == NULL) {
1444 		MGETHDR(mp, M_DONTWAIT, MT_DATA);
1445 		if (mp == NULL) {
1446 			sc->mbuf_alloc_failed++;
1447 			return (ENOBUFS);
1448 		}
1449 		MCLGET(mp, M_DONTWAIT);
1450 		if ((mp->m_flags & M_EXT) == 0) {
1451 			m_freem(mp);
1452 			sc->mbuf_cluster_failed++;
1453 			return (ENOBUFS);
1454 		}
1455 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1456 	} else {
1457 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1458 		mp->m_data = mp->m_ext.ext_buf;
1459 		mp->m_next = NULL;
1460 	}
1461 
1462 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1463 		m_adj(mp, ETHER_ALIGN);
1464 
1465 	rx_buffer = &sc->rx_buffer_area[i];
1466 
1467 	/*
1468 	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1469 	 * machinery to arrange the memory mapping.
1470 	 */
1471 	error = bus_dmamap_load_mbuf(sc->rxtag, rx_buffer->map,
1472 	    mp, BUS_DMA_NOWAIT);
1473 	if (error) {
1474 		m_freem(mp);
1475 		return (error);
1476 	}
1477 	rx_buffer->m_head = mp;
1478 	bzero(&sc->rx_desc_base[i], sizeof(sc->rx_desc_base[i]));
1479 	sc->rx_desc_base[i].buff_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr);
1480 	bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0,
1481 	    rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD);
1482 
1483 	return (0);
1484 }
1485 
1486 /*********************************************************************
1487  *
1488  *  Allocate memory for rx_buffer structures. Since we use one
1489  *  rx_buffer per received packet, the maximum number of rx_buffer's
1490  *  that we'll need is equal to the number of receive descriptors
1491  *  that we've allocated.
1492  *
1493  **********************************************************************/
1494 int
1495 ixgb_allocate_receive_structures(struct ixgb_softc *sc)
1496 {
1497 	int             i, error;
1498 	struct ixgb_buffer *rx_buffer;
1499 
1500 	if (!(sc->rx_buffer_area = mallocarray(sc->num_rx_desc,
1501 	    sizeof(struct ixgb_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1502 		printf("%s: Unable to allocate rx_buffer memory\n",
1503 		       sc->sc_dv.dv_xname);
1504 		return (ENOMEM);
1505 	}
1506 
1507 	sc->rxtag = sc->osdep.ixgb_pa.pa_dmat;
1508 
1509 	rx_buffer = sc->rx_buffer_area;
1510 	for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
1511 		error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
1512 					  MCLBYTES, 0, BUS_DMA_NOWAIT,
1513 					  &rx_buffer->map);
1514 		if (error != 0) {
1515 			printf("%s: ixgb_allocate_receive_structures: "
1516 			       "bus_dmamap_create failed; error %u\n",
1517 			       sc->sc_dv.dv_xname, error);
1518 			goto fail;
1519 		}
1520 	}
1521 
1522 	for (i = 0; i < sc->num_rx_desc; i++) {
1523 		error = ixgb_get_buf(sc, i, NULL);
1524 		if (error != 0)
1525 			goto fail;
1526 	}
1527 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1528 	    sc->rxdma.dma_map->dm_mapsize,
1529 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1530 
1531 	return (0);
1532 
1533 fail:
1534 	ixgb_free_receive_structures(sc);
1535 	return (error);
1536 }
1537 
1538 /*********************************************************************
1539  *
1540  *  Allocate and initialize receive structures.
1541  *
1542  **********************************************************************/
1543 int
1544 ixgb_setup_receive_structures(struct ixgb_softc *sc)
1545 {
1546 	bzero((void *)sc->rx_desc_base,
1547 	      (sizeof(struct ixgb_rx_desc)) * sc->num_rx_desc);
1548 
1549 	if (ixgb_allocate_receive_structures(sc))
1550 		return (ENOMEM);
1551 
1552 	/* Setup our descriptor pointers */
1553 	sc->next_rx_desc_to_check = 0;
1554 	sc->next_rx_desc_to_use = 0;
1555 	return (0);
1556 }
1557 
1558 /*********************************************************************
1559  *
1560  *  Enable receive unit.
1561  *
1562  **********************************************************************/
1563 void
1564 ixgb_initialize_receive_unit(struct ixgb_softc *sc)
1565 {
1566 	u_int32_t       reg_rctl;
1567 	u_int32_t       reg_rxcsum;
1568 	u_int32_t       reg_rxdctl;
1569 	u_int64_t       bus_addr;
1570 
1571 	/*
1572 	 * Make sure receives are disabled while setting up the descriptor
1573 	 * ring
1574 	 */
1575 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
1576 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1577 
1578 	/* Set the Receive Delay Timer Register */
1579 	IXGB_WRITE_REG(&sc->hw, RDTR,
1580 		       sc->rx_int_delay);
1581 
1582 	/* Setup the Base and Length of the Rx Descriptor Ring */
1583 	bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
1584 	IXGB_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
1585 	IXGB_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
1586 	IXGB_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
1587 		       sizeof(struct ixgb_rx_desc));
1588 
1589 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
1590 	IXGB_WRITE_REG(&sc->hw, RDH, 0);
1591 
1592 	IXGB_WRITE_REG(&sc->hw, RDT, sc->num_rx_desc - 1);
1593 
1594 	reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1595 		| RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1596 		| RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1597 	IXGB_WRITE_REG(&sc->hw, RXDCTL, reg_rxdctl);
1598 
1599 	sc->raidc = 1;
1600 	if (sc->raidc) {
1601 		uint32_t        raidc;
1602 		uint8_t         poll_threshold;
1603 #define IXGB_RAIDC_POLL_DEFAULT 120
1604 
1605 		poll_threshold = ((sc->num_rx_desc - 1) >> 3);
1606 		poll_threshold >>= 1;
1607 		poll_threshold &= 0x3F;
1608 		raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1609 			(IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1610 			(sc->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1611 			poll_threshold;
1612 		IXGB_WRITE_REG(&sc->hw, RAIDC, raidc);
1613 	}
1614 
1615 	/* Enable Receive Checksum Offload for TCP and UDP ? */
1616 	reg_rxcsum = IXGB_READ_REG(&sc->hw, RXCSUM);
1617 	reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1618 	IXGB_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
1619 
1620 	/* Setup the Receive Control Register */
1621 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
1622 	reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1623 	reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1624 		IXGB_RCTL_CFF |
1625 		(sc->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1626 
1627 	switch (sc->rx_buffer_len) {
1628 	default:
1629 	case IXGB_RXBUFFER_2048:
1630 		reg_rctl |= IXGB_RCTL_BSIZE_2048;
1631 		break;
1632 	case IXGB_RXBUFFER_4096:
1633 		reg_rctl |= IXGB_RCTL_BSIZE_4096;
1634 		break;
1635 	case IXGB_RXBUFFER_8192:
1636 		reg_rctl |= IXGB_RCTL_BSIZE_8192;
1637 		break;
1638 	case IXGB_RXBUFFER_16384:
1639 		reg_rctl |= IXGB_RCTL_BSIZE_16384;
1640 		break;
1641 	}
1642 
1643 	reg_rctl |= IXGB_RCTL_RXEN;
1644 
1645 	/* Enable Receives */
1646 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1647 }
1648 
1649 /*********************************************************************
1650  *
1651  *  Free receive related data structures.
1652  *
1653  **********************************************************************/
1654 void
1655 ixgb_free_receive_structures(struct ixgb_softc *sc)
1656 {
1657 	struct ixgb_buffer *rx_buffer;
1658 	int             i;
1659 
1660 	INIT_DEBUGOUT("free_receive_structures: begin");
1661 
1662 	if (sc->rx_buffer_area != NULL) {
1663 		rx_buffer = sc->rx_buffer_area;
1664 		for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
1665 			if (rx_buffer->map != NULL &&
1666 			    rx_buffer->map->dm_nsegs > 0) {
1667 				bus_dmamap_sync(sc->rxtag, rx_buffer->map,
1668 				    0, rx_buffer->map->dm_mapsize,
1669 				    BUS_DMASYNC_POSTREAD);
1670 				bus_dmamap_unload(sc->rxtag,
1671 				    rx_buffer->map);
1672 			}
1673 			if (rx_buffer->m_head != NULL) {
1674 				m_freem(rx_buffer->m_head);
1675 				rx_buffer->m_head = NULL;
1676 			}
1677 			if (rx_buffer->map != NULL) {
1678 				bus_dmamap_destroy(sc->rxtag,
1679 				    rx_buffer->map);
1680 				rx_buffer->map = NULL;
1681 			}
1682 		}
1683 	}
1684 	if (sc->rx_buffer_area != NULL) {
1685 		free(sc->rx_buffer_area, M_DEVBUF, 0);
1686 		sc->rx_buffer_area = NULL;
1687 	}
1688 	if (sc->rxtag != NULL)
1689 		sc->rxtag = NULL;
1690 }
1691 
1692 /*********************************************************************
1693  *
1694  *  This routine executes in interrupt context. It replenishes
1695  *  the mbufs in the descriptor and sends data which has been
1696  *  dma'ed into host memory to upper layer.
1697  *
1698  *  We loop at most count times if count is > 0, or until done if
1699  *  count < 0.
1700  *
1701  *********************************************************************/
1702 void
1703 ixgb_rxeof(struct ixgb_softc *sc, int count)
1704 {
1705 	struct ifnet   *ifp;
1706 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1707 	struct mbuf    *mp;
1708 	int             eop = 0;
1709 	int             len;
1710 	u_int8_t        accept_frame = 0;
1711 	int             i;
1712 	int             next_to_use = 0;
1713 	int             eop_desc;
1714 
1715 	/* Pointer to the receive descriptor being examined. */
1716 	struct ixgb_rx_desc *current_desc;
1717 
1718 	ifp = &sc->interface_data.ac_if;
1719 	i = sc->next_rx_desc_to_check;
1720 	next_to_use = sc->next_rx_desc_to_use;
1721 	eop_desc = sc->next_rx_desc_to_check;
1722 	current_desc = &sc->rx_desc_base[i];
1723 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1724 	    sc->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1725 
1726 	if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD))
1727 		return;
1728 
1729 	while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) &&
1730 		    (count != 0) &&
1731 		    (ifp->if_flags & IFF_RUNNING)) {
1732 
1733 		mp = sc->rx_buffer_area[i].m_head;
1734 		bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map,
1735 		    0, sc->rx_buffer_area[i].map->dm_mapsize,
1736 		    BUS_DMASYNC_POSTREAD);
1737 		bus_dmamap_unload(sc->rxtag, sc->rx_buffer_area[i].map);
1738 
1739 		accept_frame = 1;
1740 		if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
1741 			count--;
1742 			eop = 1;
1743 		} else {
1744 			eop = 0;
1745 		}
1746 		len = letoh16(current_desc->length);
1747 
1748 		if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
1749 			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
1750 					    IXGB_RX_DESC_ERRORS_RXE))
1751 			accept_frame = 0;
1752 		if (accept_frame) {
1753 
1754 			/* Assign correct length to the current fragment */
1755 			mp->m_len = len;
1756 
1757 			if (sc->fmp == NULL) {
1758 				mp->m_pkthdr.len = len;
1759 				sc->fmp = mp;	/* Store the first mbuf */
1760 				sc->lmp = mp;
1761 			} else {
1762 				/* Chain mbuf's together */
1763 				mp->m_flags &= ~M_PKTHDR;
1764 				sc->lmp->m_next = mp;
1765 				sc->lmp = sc->lmp->m_next;
1766 				sc->fmp->m_pkthdr.len += len;
1767 			}
1768 
1769 			if (eop) {
1770 				eop_desc = i;
1771 				ixgb_receive_checksum(sc, current_desc, sc->fmp);
1772 
1773 #if NVLAN > 0
1774 				if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
1775 					sc->fmp->m_pkthdr.ether_vtag =
1776 					    letoh16(current_desc->special);
1777 					sc->fmp->m_flags |= M_VLANTAG;
1778 				}
1779 #endif
1780 
1781 
1782 				ml_enqueue(&ml, sc->fmp);
1783 				sc->fmp = NULL;
1784 				sc->lmp = NULL;
1785 			}
1786 			sc->rx_buffer_area[i].m_head = NULL;
1787 		} else {
1788 			sc->dropped_pkts++;
1789 			if (sc->fmp != NULL)
1790 				m_freem(sc->fmp);
1791 			sc->fmp = NULL;
1792 			sc->lmp = NULL;
1793 		}
1794 
1795 		/* Zero out the receive descriptors status  */
1796 		current_desc->status = 0;
1797 		bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1798 		    sc->rxdma.dma_map->dm_mapsize,
1799 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1800 
1801 		/* Advance our pointers to the next descriptor */
1802 		if (++i == sc->num_rx_desc) {
1803 			i = 0;
1804 			current_desc = sc->rx_desc_base;
1805 		} else
1806 			current_desc++;
1807 	}
1808 	sc->next_rx_desc_to_check = i;
1809 
1810 	if (--i < 0)
1811 		i = (sc->num_rx_desc - 1);
1812 
1813 	/*
1814 	 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
1815  	 * memory corruption). Avoid using and re-submitting the most recently received RX
1816 	 * descriptor back to hardware.
1817 	 *
1818 	 * if(Last written back descriptor == EOP bit set descriptor)
1819 	 * 	then avoid re-submitting the most recently received RX descriptor
1820 	 *	back to hardware.
1821 	 * if(Last written back descriptor != EOP bit set descriptor)
1822 	 *	then avoid re-submitting the most recently received RX descriptors
1823 	 * 	till last EOP bit set descriptor.
1824 	 */
1825 	if (eop_desc != i) {
1826 		if (++eop_desc == sc->num_rx_desc)
1827 			eop_desc = 0;
1828 		i = eop_desc;
1829 	}
1830 	/* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
1831 	while (next_to_use != i) {
1832 		current_desc = &sc->rx_desc_base[next_to_use];
1833 		if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
1834 			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
1835 					     IXGB_RX_DESC_ERRORS_RXE))) {
1836 			mp = sc->rx_buffer_area[next_to_use].m_head;
1837 			ixgb_get_buf(sc, next_to_use, mp);
1838 		} else {
1839 			if (ixgb_get_buf(sc, next_to_use, NULL) == ENOBUFS)
1840 				break;
1841 		}
1842 		/* Advance our pointers to the next descriptor */
1843 		if (++next_to_use == sc->num_rx_desc)
1844 			next_to_use = 0;
1845 	}
1846 	sc->next_rx_desc_to_use = next_to_use;
1847 	if (--next_to_use < 0)
1848                 next_to_use = (sc->num_rx_desc - 1);
1849         /* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
1850         IXGB_WRITE_REG(&sc->hw, RDT, next_to_use);
1851 
1852 	if_input(ifp, &ml);
1853 }
1854 
1855 /*********************************************************************
1856  *
1857  *  Verify that the hardware indicated that the checksum is valid.
1858  *  Inform the stack about the status of checksum so that stack
1859  *  doesn't spend time verifying the checksum.
1860  *
1861  *********************************************************************/
1862 void
1863 ixgb_receive_checksum(struct ixgb_softc *sc,
1864 		      struct ixgb_rx_desc *rx_desc,
1865 		      struct mbuf *mp)
1866 {
1867 	if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
1868 		mp->m_pkthdr.csum_flags = 0;
1869 		return;
1870 	}
1871 
1872 	if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
1873 		/* Did it pass? */
1874 		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
1875 			/* IP Checksum Good */
1876 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
1877 
1878 		} else {
1879 			mp->m_pkthdr.csum_flags = 0;
1880 		}
1881 	}
1882 	if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
1883 		/* Did it pass? */
1884 		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
1885 			mp->m_pkthdr.csum_flags |=
1886 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1887 		}
1888 	}
1889 }
1890 
1891 /*
1892  * This turns on the hardware offload of the VLAN
1893  * tag insertion and strip
1894  */
1895 void
1896 ixgb_enable_hw_vlans(struct ixgb_softc *sc)
1897 {
1898 	uint32_t ctrl;
1899 
1900 	ctrl = IXGB_READ_REG(&sc->hw, CTRL0);
1901 	ctrl |= IXGB_CTRL0_VME;
1902 	IXGB_WRITE_REG(&sc->hw, CTRL0, ctrl);
1903 }
1904 
1905 void
1906 ixgb_enable_intr(struct ixgb_softc *sc)
1907 {
1908 	uint32_t val;
1909 
1910 	val = IXGB_INT_RXT0 | IXGB_INT_TXDW | IXGB_INT_RXDMT0 |
1911 	      IXGB_INT_LSC | IXGB_INT_RXO;
1912 	if (sc->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
1913 		val |= IXGB_INT_GPI0;
1914 	IXGB_WRITE_REG(&sc->hw, IMS, val);
1915 }
1916 
1917 void
1918 ixgb_disable_intr(struct ixgb_softc *sc)
1919 {
1920 	IXGB_WRITE_REG(&sc->hw, IMC, ~0);
1921 }
1922 
1923 void
1924 ixgb_write_pci_cfg(struct ixgb_hw *hw,
1925 		   uint32_t reg,
1926 		   uint16_t *value)
1927 {
1928 	struct pci_attach_args *pa = &((struct ixgb_osdep *)hw->back)->ixgb_pa;
1929 	pci_chipset_tag_t pc = pa->pa_pc;
1930 	/* Should we do read/mask/write...?  16 vs 32 bit!!! */
1931 	pci_conf_write(pc, pa->pa_tag, reg, *value);
1932 }
1933 
1934 /**********************************************************************
1935  *
1936  *  Update the board statistics counters.
1937  *
1938  **********************************************************************/
1939 void
1940 ixgb_update_stats_counters(struct ixgb_softc *sc)
1941 {
1942 	struct ifnet   *ifp;
1943 
1944 	sc->stats.crcerrs += IXGB_READ_REG(&sc->hw, CRCERRS);
1945 	sc->stats.gprcl += IXGB_READ_REG(&sc->hw, GPRCL);
1946 	sc->stats.gprch += IXGB_READ_REG(&sc->hw, GPRCH);
1947 	sc->stats.gorcl += IXGB_READ_REG(&sc->hw, GORCL);
1948 	sc->stats.gorch += IXGB_READ_REG(&sc->hw, GORCH);
1949 	sc->stats.bprcl += IXGB_READ_REG(&sc->hw, BPRCL);
1950 	sc->stats.bprch += IXGB_READ_REG(&sc->hw, BPRCH);
1951 	sc->stats.mprcl += IXGB_READ_REG(&sc->hw, MPRCL);
1952 	sc->stats.mprch += IXGB_READ_REG(&sc->hw, MPRCH);
1953 	sc->stats.roc += IXGB_READ_REG(&sc->hw, ROC);
1954 
1955 	sc->stats.mpc += IXGB_READ_REG(&sc->hw, MPC);
1956 	sc->stats.dc += IXGB_READ_REG(&sc->hw, DC);
1957 	sc->stats.rlec += IXGB_READ_REG(&sc->hw, RLEC);
1958 	sc->stats.xonrxc += IXGB_READ_REG(&sc->hw, XONRXC);
1959 	sc->stats.xontxc += IXGB_READ_REG(&sc->hw, XONTXC);
1960 	sc->stats.xoffrxc += IXGB_READ_REG(&sc->hw, XOFFRXC);
1961 	sc->stats.xofftxc += IXGB_READ_REG(&sc->hw, XOFFTXC);
1962 	sc->stats.gptcl += IXGB_READ_REG(&sc->hw, GPTCL);
1963 	sc->stats.gptch += IXGB_READ_REG(&sc->hw, GPTCH);
1964 	sc->stats.gotcl += IXGB_READ_REG(&sc->hw, GOTCL);
1965 	sc->stats.gotch += IXGB_READ_REG(&sc->hw, GOTCH);
1966 	sc->stats.ruc += IXGB_READ_REG(&sc->hw, RUC);
1967 	sc->stats.rfc += IXGB_READ_REG(&sc->hw, RFC);
1968 	sc->stats.rjc += IXGB_READ_REG(&sc->hw, RJC);
1969 	sc->stats.torl += IXGB_READ_REG(&sc->hw, TORL);
1970 	sc->stats.torh += IXGB_READ_REG(&sc->hw, TORH);
1971 	sc->stats.totl += IXGB_READ_REG(&sc->hw, TOTL);
1972 	sc->stats.toth += IXGB_READ_REG(&sc->hw, TOTH);
1973 	sc->stats.tprl += IXGB_READ_REG(&sc->hw, TPRL);
1974 	sc->stats.tprh += IXGB_READ_REG(&sc->hw, TPRH);
1975 	sc->stats.tptl += IXGB_READ_REG(&sc->hw, TPTL);
1976 	sc->stats.tpth += IXGB_READ_REG(&sc->hw, TPTH);
1977 	sc->stats.plt64c += IXGB_READ_REG(&sc->hw, PLT64C);
1978 	sc->stats.mptcl += IXGB_READ_REG(&sc->hw, MPTCL);
1979 	sc->stats.mptch += IXGB_READ_REG(&sc->hw, MPTCH);
1980 	sc->stats.bptcl += IXGB_READ_REG(&sc->hw, BPTCL);
1981 	sc->stats.bptch += IXGB_READ_REG(&sc->hw, BPTCH);
1982 
1983 	sc->stats.uprcl += IXGB_READ_REG(&sc->hw, UPRCL);
1984 	sc->stats.uprch += IXGB_READ_REG(&sc->hw, UPRCH);
1985 	sc->stats.vprcl += IXGB_READ_REG(&sc->hw, VPRCL);
1986 	sc->stats.vprch += IXGB_READ_REG(&sc->hw, VPRCH);
1987 	sc->stats.jprcl += IXGB_READ_REG(&sc->hw, JPRCL);
1988 	sc->stats.jprch += IXGB_READ_REG(&sc->hw, JPRCH);
1989 	sc->stats.rnbc += IXGB_READ_REG(&sc->hw, RNBC);
1990 	sc->stats.icbc += IXGB_READ_REG(&sc->hw, ICBC);
1991 	sc->stats.ecbc += IXGB_READ_REG(&sc->hw, ECBC);
1992 	sc->stats.uptcl += IXGB_READ_REG(&sc->hw, UPTCL);
1993 	sc->stats.uptch += IXGB_READ_REG(&sc->hw, UPTCH);
1994 	sc->stats.vptcl += IXGB_READ_REG(&sc->hw, VPTCL);
1995 	sc->stats.vptch += IXGB_READ_REG(&sc->hw, VPTCH);
1996 	sc->stats.jptcl += IXGB_READ_REG(&sc->hw, JPTCL);
1997 	sc->stats.jptch += IXGB_READ_REG(&sc->hw, JPTCH);
1998 	sc->stats.tsctc += IXGB_READ_REG(&sc->hw, TSCTC);
1999 	sc->stats.tsctfc += IXGB_READ_REG(&sc->hw, TSCTFC);
2000 	sc->stats.ibic += IXGB_READ_REG(&sc->hw, IBIC);
2001 	sc->stats.lfc += IXGB_READ_REG(&sc->hw, LFC);
2002 	sc->stats.pfrc += IXGB_READ_REG(&sc->hw, PFRC);
2003 	sc->stats.pftc += IXGB_READ_REG(&sc->hw, PFTC);
2004 	sc->stats.mcfrc += IXGB_READ_REG(&sc->hw, MCFRC);
2005 
2006 	ifp = &sc->interface_data.ac_if;
2007 
2008 	/* Fill out the OS statistics structure */
2009 	ifp->if_collisions = 0;
2010 
2011 	/* Rx Errors */
2012 	ifp->if_ierrors =
2013 		sc->dropped_pkts +
2014 		sc->stats.crcerrs +
2015 		sc->stats.rnbc +
2016 		sc->stats.mpc +
2017 		sc->stats.rlec;
2018 
2019 	/* Tx Errors */
2020 	ifp->if_oerrors =
2021 		sc->watchdog_events;
2022 }
2023 
2024 #ifdef IXGB_DEBUG
2025 /**********************************************************************
2026  *
2027  *  This routine is called only when ixgb_display_debug_stats is enabled.
2028  *  This routine provides a way to take a look at important statistics
2029  *  maintained by the driver and hardware.
2030  *
2031  **********************************************************************/
2032 void
2033 ixgb_print_hw_stats(struct ixgb_softc *sc)
2034 {
2035 	char            buf_speed[100], buf_type[100];
2036 	ixgb_bus_speed  bus_speed;
2037 	ixgb_bus_type   bus_type;
2038 	const char * const unit = sc->sc_dv.dv_xname;
2039 
2040 	bus_speed = sc->hw.bus.speed;
2041 	bus_type = sc->hw.bus.type;
2042 	snprintf(buf_speed, sizeof(buf_speed),
2043 		bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2044 		bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2045 		bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2046 		bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2047 		"UNKNOWN");
2048 	printf("%s: PCI_Bus_Speed = %s\n", unit,
2049 		buf_speed);
2050 
2051 	snprintf(buf_type, sizeof(buf_type),
2052 		bus_type == ixgb_bus_type_pci ? "PCI" :
2053 		bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2054 		"UNKNOWN");
2055 	printf("%s: PCI_Bus_Type = %s\n", unit,
2056 		buf_type);
2057 
2058 	printf("%s: Tx Descriptors not Avail1 = %ld\n", unit,
2059 		sc->no_tx_desc_avail1);
2060 	printf("%s: Tx Descriptors not Avail2 = %ld\n", unit,
2061 		sc->no_tx_desc_avail2);
2062 	printf("%s: Std Mbuf Failed = %ld\n", unit,
2063 		sc->mbuf_alloc_failed);
2064 	printf("%s: Std Cluster Failed = %ld\n", unit,
2065 		sc->mbuf_cluster_failed);
2066 
2067 	printf("%s: Defer count = %lld\n", unit,
2068 		(long long)sc->stats.dc);
2069 	printf("%s: Missed Packets = %lld\n", unit,
2070 		(long long)sc->stats.mpc);
2071 	printf("%s: Receive No Buffers = %lld\n", unit,
2072 		(long long)sc->stats.rnbc);
2073 	printf("%s: Receive length errors = %lld\n", unit,
2074 		(long long)sc->stats.rlec);
2075 	printf("%s: Crc errors = %lld\n", unit,
2076 		(long long)sc->stats.crcerrs);
2077 	printf("%s: Driver dropped packets = %ld\n", unit,
2078 		sc->dropped_pkts);
2079 
2080 	printf("%s: XON Rcvd = %lld\n", unit,
2081 		(long long)sc->stats.xonrxc);
2082 	printf("%s: XON Xmtd = %lld\n", unit,
2083 		(long long)sc->stats.xontxc);
2084 	printf("%s: XOFF Rcvd = %lld\n", unit,
2085 		(long long)sc->stats.xoffrxc);
2086 	printf("%s: XOFF Xmtd = %lld\n", unit,
2087 		(long long)sc->stats.xofftxc);
2088 
2089 	printf("%s: Good Packets Rcvd = %lld\n", unit,
2090 		(long long)sc->stats.gprcl);
2091 	printf("%s: Good Packets Xmtd = %lld\n", unit,
2092 		(long long)sc->stats.gptcl);
2093 
2094 	printf("%s: Jumbo frames recvd = %lld\n", unit,
2095 		(long long)sc->stats.jprcl);
2096 	printf("%s: Jumbo frames Xmtd = %lld\n", unit,
2097 		(long long)sc->stats.jptcl);
2098 }
2099 #endif
2100