xref: /openbsd-src/sys/dev/pci/if_ixgb.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /**************************************************************************
2 
3 Copyright (c) 2001-2005, Intel Corporation
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15 
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19 
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31 
32 ***************************************************************************/
33 
34 /* $OpenBSD: if_ixgb.c,v 1.62 2014/07/13 23:10:23 deraadt Exp $ */
35 
36 #include <dev/pci/if_ixgb.h>
37 
38 #ifdef IXGB_DEBUG
39 /*********************************************************************
40  *  Set this to one to display debug statistics
41  *********************************************************************/
42 int             ixgb_display_debug_stats = 0;
43 #endif
44 
45 /*********************************************************************
46  *  Driver version
47  *********************************************************************/
48 
49 #define IXGB_DRIVER_VERSION	"6.1.0"
50 
51 /*********************************************************************
52  *  PCI Device ID Table
53  *********************************************************************/
54 
55 const struct pci_matchid ixgb_devices[] = {
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_SR },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_LR },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_CX4 },
60 };
61 
62 /*********************************************************************
63  *  Function prototypes
64  *********************************************************************/
65 int  ixgb_probe(struct device *, void *, void *);
66 void ixgb_attach(struct device *, struct device *, void *);
67 int  ixgb_intr(void *);
68 void ixgb_start(struct ifnet *);
69 int  ixgb_ioctl(struct ifnet *, u_long, caddr_t);
70 void ixgb_watchdog(struct ifnet *);
71 void ixgb_init(void *);
72 void ixgb_stop(void *);
73 void ixgb_media_status(struct ifnet *, struct ifmediareq *);
74 int  ixgb_media_change(struct ifnet *);
75 void ixgb_identify_hardware(struct ixgb_softc *);
76 int  ixgb_allocate_pci_resources(struct ixgb_softc *);
77 void ixgb_free_pci_resources(struct ixgb_softc *);
78 void ixgb_local_timer(void *);
79 int  ixgb_hardware_init(struct ixgb_softc *);
80 void ixgb_setup_interface(struct ixgb_softc *);
81 int  ixgb_setup_transmit_structures(struct ixgb_softc *);
82 void ixgb_initialize_transmit_unit(struct ixgb_softc *);
83 int  ixgb_setup_receive_structures(struct ixgb_softc *);
84 void ixgb_initialize_receive_unit(struct ixgb_softc *);
85 void ixgb_enable_intr(struct ixgb_softc *);
86 void ixgb_disable_intr(struct ixgb_softc *);
87 void ixgb_free_transmit_structures(struct ixgb_softc *);
88 void ixgb_free_receive_structures(struct ixgb_softc *);
89 void ixgb_update_stats_counters(struct ixgb_softc *);
90 void ixgb_txeof(struct ixgb_softc *);
91 int  ixgb_allocate_receive_structures(struct ixgb_softc *);
92 int  ixgb_allocate_transmit_structures(struct ixgb_softc *);
93 void ixgb_rxeof(struct ixgb_softc *, int);
94 void
95 ixgb_receive_checksum(struct ixgb_softc *,
96 		      struct ixgb_rx_desc * rx_desc,
97 		      struct mbuf *);
98 void
99 ixgb_transmit_checksum_setup(struct ixgb_softc *,
100 			     struct mbuf *,
101 			     u_int8_t *);
102 void ixgb_set_promisc(struct ixgb_softc *);
103 void ixgb_set_multi(struct ixgb_softc *);
104 #ifdef IXGB_DEBUG
105 void ixgb_print_hw_stats(struct ixgb_softc *);
106 #endif
107 void ixgb_update_link_status(struct ixgb_softc *);
108 int
109 ixgb_get_buf(struct ixgb_softc *, int i,
110 	     struct mbuf *);
111 void ixgb_enable_hw_vlans(struct ixgb_softc *);
112 int  ixgb_encap(struct ixgb_softc *, struct mbuf *);
113 int
114 ixgb_dma_malloc(struct ixgb_softc *, bus_size_t,
115 		struct ixgb_dma_alloc *, int);
116 void ixgb_dma_free(struct ixgb_softc *, struct ixgb_dma_alloc *);
117 
118 /*********************************************************************
119  *  OpenBSD Device Interface Entry Points
120  *********************************************************************/
121 
122 struct cfattach ixgb_ca = {
123 	sizeof(struct ixgb_softc), ixgb_probe, ixgb_attach
124 };
125 
126 struct cfdriver ixgb_cd = {
127 	NULL, "ixgb", DV_IFNET
128 };
129 
130 /* some defines for controlling descriptor fetches in h/w */
131 #define RXDCTL_PTHRESH_DEFAULT 0	/* chip considers prefech below this */
132 #define RXDCTL_HTHRESH_DEFAULT 0	/* chip will only prefetch if tail is
133 					 * pushed this many descriptors from
134 					 * head */
135 #define RXDCTL_WTHRESH_DEFAULT 0	/* chip writes back at this many or RXT0 */
136 
137 
138 /*********************************************************************
139  *  Device identification routine
140  *
141  *  ixgb_probe determines if the driver should be loaded on
142  *  adapter based on PCI vendor/device id of the adapter.
143  *
144  *  return 0 on no match, positive on match
145  *********************************************************************/
146 
147 int
148 ixgb_probe(struct device *parent, void *match, void *aux)
149 {
150 	INIT_DEBUGOUT("ixgb_probe: begin");
151 
152 	return (pci_matchbyid((struct pci_attach_args *)aux, ixgb_devices,
153 	    nitems(ixgb_devices)));
154 }
155 
156 /*********************************************************************
157  *  Device initialization routine
158  *
159  *  The attach entry point is called when the driver is being loaded.
160  *  This routine identifies the type of hardware, allocates all resources
161  *  and initializes the hardware.
162  *
163  *********************************************************************/
164 
165 void
166 ixgb_attach(struct device *parent, struct device *self, void *aux)
167 {
168 	struct pci_attach_args *pa = aux;
169 	struct ixgb_softc *sc;
170 	int             tsize, rsize;
171 
172 	INIT_DEBUGOUT("ixgb_attach: begin");
173 
174 	sc = (struct ixgb_softc *)self;
175 	sc->osdep.ixgb_pa = *pa;
176 
177 	timeout_set(&sc->timer_handle, ixgb_local_timer, sc);
178 
179 	/* Determine hardware revision */
180 	ixgb_identify_hardware(sc);
181 
182 	/* Parameters (to be read from user) */
183 	sc->num_tx_desc = IXGB_MAX_TXD;
184 	sc->num_rx_desc = IXGB_MAX_RXD;
185 	sc->tx_int_delay = TIDV;
186 	sc->rx_int_delay = RDTR;
187 	sc->rx_buffer_len = IXGB_RXBUFFER_2048;
188 
189 	/*
190 	 * These parameters control the automatic generation(Tx) and
191 	 * response(Rx) to Ethernet PAUSE frames.
192 	 */
193 	sc->hw.fc.high_water = FCRTH;
194 	sc->hw.fc.low_water = FCRTL;
195 	sc->hw.fc.pause_time = FCPAUSE;
196 	sc->hw.fc.send_xon = TRUE;
197 	sc->hw.fc.type = FLOW_CONTROL;
198 
199 	/* Set the max frame size assuming standard ethernet sized frames */
200 	sc->hw.max_frame_size = IXGB_MAX_JUMBO_FRAME_SIZE;
201 
202 	if (ixgb_allocate_pci_resources(sc))
203 		goto err_pci;
204 
205 	tsize = IXGB_ROUNDUP(sc->num_tx_desc * sizeof(struct ixgb_tx_desc),
206 	    IXGB_MAX_TXD * sizeof(struct ixgb_tx_desc));
207 	tsize = IXGB_ROUNDUP(tsize, PAGE_SIZE);
208 
209 	/* Allocate Transmit Descriptor ring */
210 	if (ixgb_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
211 		printf("%s: Unable to allocate TxDescriptor memory\n",
212 		       sc->sc_dv.dv_xname);
213 		goto err_tx_desc;
214 	}
215 	sc->tx_desc_base = (struct ixgb_tx_desc *) sc->txdma.dma_vaddr;
216 
217 	rsize = IXGB_ROUNDUP(sc->num_rx_desc * sizeof(struct ixgb_rx_desc),
218 	    IXGB_MAX_RXD * sizeof(struct ixgb_rx_desc));
219 	rsize = IXGB_ROUNDUP(rsize, PAGE_SIZE);
220 
221 	/* Allocate Receive Descriptor ring */
222 	if (ixgb_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
223 		printf("%s: Unable to allocate rx_desc memory\n",
224 		       sc->sc_dv.dv_xname);
225 		goto err_rx_desc;
226 	}
227 	sc->rx_desc_base = (struct ixgb_rx_desc *) sc->rxdma.dma_vaddr;
228 
229 	/* Initialize the hardware */
230 	if (ixgb_hardware_init(sc)) {
231 		printf("%s: Unable to initialize the hardware\n",
232 		       sc->sc_dv.dv_xname);
233 		goto err_hw_init;
234 	}
235 
236 	/* Setup OS specific network interface */
237 	ixgb_setup_interface(sc);
238 
239 	/* Initialize statistics */
240 	ixgb_clear_hw_cntrs(&sc->hw);
241 	ixgb_update_stats_counters(sc);
242 	ixgb_update_link_status(sc);
243 
244 	printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
245 
246 	INIT_DEBUGOUT("ixgb_attach: end");
247 	return;
248 
249 err_hw_init:
250 	ixgb_dma_free(sc, &sc->rxdma);
251 err_rx_desc:
252 	ixgb_dma_free(sc, &sc->txdma);
253 err_tx_desc:
254 err_pci:
255 	ixgb_free_pci_resources(sc);
256 }
257 
258 /*********************************************************************
259  *  Transmit entry point
260  *
261  *  ixgb_start is called by the stack to initiate a transmit.
262  *  The driver will remain in this routine as long as there are
263  *  packets to transmit and transmit resources are available.
264  *  In case resources are not available stack is notified and
265  *  the packet is requeued.
266  **********************************************************************/
267 
268 void
269 ixgb_start(struct ifnet *ifp)
270 {
271 	struct mbuf    *m_head;
272 	struct ixgb_softc *sc = ifp->if_softc;
273 	int		post = 0;
274 
275 	if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
276 		return;
277 
278 	if (!sc->link_active)
279 		return;
280 
281 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
282 	    sc->txdma.dma_map->dm_mapsize,
283 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
284 
285 	for (;;) {
286 		IFQ_POLL(&ifp->if_snd, m_head);
287 		if (m_head == NULL)
288 			break;
289 
290 		if (ixgb_encap(sc, m_head)) {
291 			ifp->if_flags |= IFF_OACTIVE;
292 			break;
293 		}
294 
295 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
296 
297 #if NBPFILTER > 0
298 		/* Send a copy of the frame to the BPF listener */
299 		if (ifp->if_bpf)
300 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
301 #endif
302 
303 		/* Set timeout in case hardware has problems transmitting */
304 		ifp->if_timer = IXGB_TX_TIMEOUT;
305 
306 		post = 1;
307 	}
308 
309 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
310 	    sc->txdma.dma_map->dm_mapsize,
311 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
312 	/*
313 	 * Advance the Transmit Descriptor Tail (Tdt),
314 	 * this tells the E1000 that this frame
315 	 * is available to transmit.
316 	 */
317 	if (post)
318 		IXGB_WRITE_REG(&sc->hw, TDT, sc->next_avail_tx_desc);
319 }
320 
321 /*********************************************************************
322  *  Ioctl entry point
323  *
324  *  ixgb_ioctl is called when the user wants to configure the
325  *  interface.
326  *
327  *  return 0 on success, positive on failure
328  **********************************************************************/
329 
330 int
331 ixgb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
332 {
333 	struct ixgb_softc *sc = ifp->if_softc;
334 	struct ifaddr	*ifa = (struct ifaddr *) data;
335 	struct ifreq	*ifr = (struct ifreq *) data;
336 	int		s, error = 0;
337 
338 	s = splnet();
339 
340 	switch (command) {
341 	case SIOCSIFADDR:
342 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
343 			       "Addr)");
344 		ifp->if_flags |= IFF_UP;
345 		if (!(ifp->if_flags & IFF_RUNNING))
346 			ixgb_init(sc);
347 #ifdef INET
348 		if (ifa->ifa_addr->sa_family == AF_INET)
349 			arp_ifinit(&sc->interface_data, ifa);
350 #endif /* INET */
351 		break;
352 
353 	case SIOCSIFFLAGS:
354 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
355 		if (ifp->if_flags & IFF_UP) {
356 			/*
357 			 * If only the PROMISC or ALLMULTI flag changes, then
358 			 * don't do a full re-init of the chip, just update
359 			 * the Rx filter.
360 			 */
361 			if ((ifp->if_flags & IFF_RUNNING) &&
362 			    ((ifp->if_flags ^ sc->if_flags) &
363 			     (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
364 				ixgb_set_promisc(sc);
365 			} else {
366 				if (!(ifp->if_flags & IFF_RUNNING))
367 					ixgb_init(sc);
368 			}
369 		} else {
370 			if (ifp->if_flags & IFF_RUNNING)
371 				ixgb_stop(sc);
372 		}
373 		sc->if_flags = ifp->if_flags;
374 		break;
375 
376 	case SIOCSIFMEDIA:
377 	case SIOCGIFMEDIA:
378 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
379 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
380 		break;
381 
382 	default:
383 		error = ether_ioctl(ifp, &sc->interface_data, command, data);
384 	}
385 
386 	if (error == ENETRESET) {
387 		if (ifp->if_flags & IFF_RUNNING) {
388 			ixgb_disable_intr(sc);
389 			ixgb_set_multi(sc);
390 			ixgb_enable_intr(sc);
391 		}
392 		error = 0;
393 	}
394 
395 	splx(s);
396 	return (error);
397 }
398 
399 /*********************************************************************
400  *  Watchdog entry point
401  *
402  *  This routine is called whenever hardware quits transmitting.
403  *
404  **********************************************************************/
405 
406 void
407 ixgb_watchdog(struct ifnet * ifp)
408 {
409 	struct ixgb_softc *sc = ifp->if_softc;
410 
411 	/*
412 	 * If we are in this routine because of pause frames, then don't
413 	 * reset the hardware.
414 	 */
415 	if (IXGB_READ_REG(&sc->hw, STATUS) & IXGB_STATUS_TXOFF) {
416 		ifp->if_timer = IXGB_TX_TIMEOUT;
417 		return;
418 	}
419 
420 	printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
421 
422 	ixgb_init(sc);
423 
424 	sc->watchdog_events++;
425 }
426 
427 /*********************************************************************
428  *  Init entry point
429  *
430  *  This routine is used in two ways. It is used by the stack as
431  *  init entry point in network interface structure. It is also used
432  *  by the driver as a hw/sw initialization routine to get to a
433  *  consistent state.
434  *
435  **********************************************************************/
436 
437 void
438 ixgb_init(void *arg)
439 {
440 	struct ixgb_softc *sc = arg;
441 	struct ifnet   *ifp = &sc->interface_data.ac_if;
442 	uint32_t temp_reg;
443 	int s;
444 
445 	INIT_DEBUGOUT("ixgb_init: begin");
446 
447 	s = splnet();
448 
449 	ixgb_stop(sc);
450 
451 	/* Get the latest mac address, User can use a LAA */
452 	bcopy(sc->interface_data.ac_enaddr, sc->hw.curr_mac_addr,
453 	      IXGB_ETH_LENGTH_OF_ADDRESS);
454 
455 	/* Initialize the hardware */
456 	if (ixgb_hardware_init(sc)) {
457 		printf("%s: Unable to initialize the hardware\n",
458 		       sc->sc_dv.dv_xname);
459 		splx(s);
460 		return;
461 	}
462 
463 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
464 		ixgb_enable_hw_vlans(sc);
465 
466 	/* Prepare transmit descriptors and buffers */
467 	if (ixgb_setup_transmit_structures(sc)) {
468 		printf("%s: Could not setup transmit structures\n",
469 		       sc->sc_dv.dv_xname);
470 		ixgb_stop(sc);
471 		splx(s);
472 		return;
473 	}
474 	ixgb_initialize_transmit_unit(sc);
475 
476 	/* Setup Multicast table */
477 	ixgb_set_multi(sc);
478 
479 	/* Prepare receive descriptors and buffers */
480 	if (ixgb_setup_receive_structures(sc)) {
481 		printf("%s: Could not setup receive structures\n",
482 		       sc->sc_dv.dv_xname);
483 		ixgb_stop(sc);
484 		splx(s);
485 		return;
486 	}
487 	ixgb_initialize_receive_unit(sc);
488 
489 	/* Don't lose promiscuous settings */
490 	ixgb_set_promisc(sc);
491 
492 	ifp->if_flags |= IFF_RUNNING;
493 	ifp->if_flags &= ~IFF_OACTIVE;
494 
495 	/* Enable jumbo frames */
496 	IXGB_WRITE_REG(&sc->hw, MFRMS,
497 	    sc->hw.max_frame_size << IXGB_MFRMS_SHIFT);
498 	temp_reg = IXGB_READ_REG(&sc->hw, CTRL0);
499 	temp_reg |= IXGB_CTRL0_JFE;
500 	IXGB_WRITE_REG(&sc->hw, CTRL0, temp_reg);
501 
502 	timeout_add_sec(&sc->timer_handle, 1);
503 	ixgb_clear_hw_cntrs(&sc->hw);
504 	ixgb_enable_intr(sc);
505 
506 	splx(s);
507 }
508 
509 /*********************************************************************
510  *
511  *  Interrupt Service routine
512  *
513  **********************************************************************/
514 
515 int
516 ixgb_intr(void *arg)
517 {
518 	struct ixgb_softc *sc = arg;
519 	struct ifnet	*ifp;
520 	u_int32_t	reg_icr;
521 	boolean_t	rxdmt0 = FALSE;
522 	int claimed = 0;
523 
524 	ifp = &sc->interface_data.ac_if;
525 
526 	for (;;) {
527 		reg_icr = IXGB_READ_REG(&sc->hw, ICR);
528 		if (reg_icr == 0)
529 			break;
530 
531 		claimed = 1;
532 
533 		if (reg_icr & IXGB_INT_RXDMT0)
534 			rxdmt0 = TRUE;
535 
536 		if (ifp->if_flags & IFF_RUNNING) {
537 			ixgb_rxeof(sc, -1);
538 			ixgb_txeof(sc);
539 		}
540 
541 		/* Link status change */
542 		if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
543 			timeout_del(&sc->timer_handle);
544 			ixgb_check_for_link(&sc->hw);
545 			ixgb_update_link_status(sc);
546 			timeout_add_sec(&sc->timer_handle, 1);
547 		}
548 
549 		if (rxdmt0 && sc->raidc) {
550 			IXGB_WRITE_REG(&sc->hw, IMC, IXGB_INT_RXDMT0);
551 			IXGB_WRITE_REG(&sc->hw, IMS, IXGB_INT_RXDMT0);
552 		}
553 	}
554 
555 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
556 		ixgb_start(ifp);
557 
558 	return (claimed);
559 }
560 
561 
562 /*********************************************************************
563  *
564  *  Media Ioctl callback
565  *
566  *  This routine is called whenever the user queries the status of
567  *  the interface using ifconfig.
568  *
569  **********************************************************************/
570 void
571 ixgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
572 {
573 	struct ixgb_softc *sc = ifp->if_softc;
574 
575 	INIT_DEBUGOUT("ixgb_media_status: begin");
576 
577 	ixgb_check_for_link(&sc->hw);
578 	ixgb_update_link_status(sc);
579 
580 	ifmr->ifm_status = IFM_AVALID;
581 	ifmr->ifm_active = IFM_ETHER;
582 
583 	if (!sc->hw.link_up) {
584 		ifmr->ifm_active |= IFM_NONE;
585 		return;
586 	}
587 
588 	ifmr->ifm_status |= IFM_ACTIVE;
589 	if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
590 	    (sc->hw.phy_type == ixgb_phy_type_txn17401))
591 		ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
592 	else
593 		ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
594 
595 	return;
596 }
597 
598 /*********************************************************************
599  *
600  *  Media Ioctl callback
601  *
602  *  This routine is called when the user changes speed/duplex using
603  *  media/mediopt option with ifconfig.
604  *
605  **********************************************************************/
606 int
607 ixgb_media_change(struct ifnet * ifp)
608 {
609 	struct ixgb_softc *sc = ifp->if_softc;
610 	struct ifmedia *ifm = &sc->media;
611 
612 	INIT_DEBUGOUT("ixgb_media_change: begin");
613 
614 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
615 		return (EINVAL);
616 
617 	return (0);
618 }
619 
620 /*********************************************************************
621  *
622  *  This routine maps the mbufs to tx descriptors.
623  *
624  *  return 0 on success, positive on failure
625  **********************************************************************/
626 
627 int
628 ixgb_encap(struct ixgb_softc *sc, struct mbuf *m_head)
629 {
630 	u_int8_t        txd_popts;
631 	int             i, j, error = 0;
632 	bus_dmamap_t	map;
633 
634 	struct ixgb_buffer *tx_buffer;
635 	struct ixgb_tx_desc *current_tx_desc = NULL;
636 
637 	/*
638 	 * Force a cleanup if number of TX descriptors available hits the
639 	 * threshold
640 	 */
641 	if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
642 		ixgb_txeof(sc);
643 		/* Now do we at least have a minimal? */
644 		if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
645 			sc->no_tx_desc_avail1++;
646 			return (ENOBUFS);
647 		}
648 	}
649 
650 	/*
651 	 * Map the packet for DMA.
652 	 */
653 	tx_buffer = &sc->tx_buffer_area[sc->next_avail_tx_desc];
654 	map = tx_buffer->map;
655 
656 	error = bus_dmamap_load_mbuf(sc->txtag, map,
657 				     m_head, BUS_DMA_NOWAIT);
658 	if (error != 0) {
659 		sc->no_tx_dma_setup++;
660 		return (error);
661 	}
662 	IXGB_KASSERT(map->dm_nsegs != 0, ("ixgb_encap: empty packet"));
663 
664 	if (map->dm_nsegs > sc->num_tx_desc_avail)
665 		goto fail;
666 
667 #ifdef IXGB_CSUM_OFFLOAD
668 	ixgb_transmit_checksum_setup(sc, m_head, &txd_popts);
669 #else
670 	txd_popts = 0;
671 #endif
672 
673 	i = sc->next_avail_tx_desc;
674 	for (j = 0; j < map->dm_nsegs; j++) {
675 		tx_buffer = &sc->tx_buffer_area[i];
676 		current_tx_desc = &sc->tx_desc_base[i];
677 
678 		current_tx_desc->buff_addr = htole64(map->dm_segs[j].ds_addr);
679 		current_tx_desc->cmd_type_len = htole32((sc->txd_cmd | map->dm_segs[j].ds_len));
680 		current_tx_desc->popts = txd_popts;
681 		if (++i == sc->num_tx_desc)
682 			i = 0;
683 
684 		tx_buffer->m_head = NULL;
685 	}
686 
687 	sc->num_tx_desc_avail -= map->dm_nsegs;
688 	sc->next_avail_tx_desc = i;
689 
690 	/* Find out if we are in VLAN mode */
691 	if (m_head->m_flags & M_VLANTAG) {
692 		/* Set the VLAN id */
693 		current_tx_desc->vlan = htole16(m_head->m_pkthdr.ether_vtag);
694 
695 		/* Tell hardware to add tag */
696 		current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_VLE);
697 	}
698 
699 	tx_buffer->m_head = m_head;
700 	bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
701 	    BUS_DMASYNC_PREWRITE);
702 
703 	/*
704 	 * Last Descriptor of Packet needs End Of Packet (EOP)
705 	 */
706 	current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_EOP);
707 
708 	return (0);
709 
710 fail:
711 	sc->no_tx_desc_avail2++;
712 	bus_dmamap_unload(sc->txtag, map);
713 	return (ENOBUFS);
714 }
715 
716 void
717 ixgb_set_promisc(struct ixgb_softc *sc)
718 {
719 
720 	u_int32_t       reg_rctl;
721 	struct ifnet   *ifp = &sc->interface_data.ac_if;
722 
723 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
724 
725 	if (ifp->if_flags & IFF_PROMISC) {
726 		reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
727 	} else if (ifp->if_flags & IFF_ALLMULTI) {
728 		reg_rctl |= IXGB_RCTL_MPE;
729 		reg_rctl &= ~IXGB_RCTL_UPE;
730 	} else {
731 		reg_rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
732 	}
733 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
734 }
735 
736 /*********************************************************************
737  *  Multicast Update
738  *
739  *  This routine is called whenever multicast address list is updated.
740  *
741  **********************************************************************/
742 
743 void
744 ixgb_set_multi(struct ixgb_softc *sc)
745 {
746 	u_int32_t       reg_rctl = 0;
747 	u_int8_t        mta[MAX_NUM_MULTICAST_ADDRESSES * IXGB_ETH_LENGTH_OF_ADDRESS];
748 	int             mcnt = 0;
749 	struct ifnet   *ifp = &sc->interface_data.ac_if;
750 	struct arpcom *ac = &sc->interface_data;
751 	struct ether_multi *enm;
752 	struct ether_multistep step;
753 
754 	IOCTL_DEBUGOUT("ixgb_set_multi: begin");
755 
756 	if (ac->ac_multirangecnt > 0) {
757 		ifp->if_flags |= IFF_ALLMULTI;
758 		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
759 		goto setit;
760 	}
761 
762 	ETHER_FIRST_MULTI(step, ac, enm);
763 	while (enm != NULL) {
764 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
765 			break;
766 		bcopy(enm->enm_addrlo, &mta[mcnt*IXGB_ETH_LENGTH_OF_ADDRESS],
767 		      IXGB_ETH_LENGTH_OF_ADDRESS);
768 		mcnt++;
769 		ETHER_NEXT_MULTI(step, enm);
770 	}
771 
772 setit:
773 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
774 		reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
775 		reg_rctl |= IXGB_RCTL_MPE;
776 		IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
777 	} else
778 		ixgb_mc_addr_list_update(&sc->hw, mta, mcnt, 0);
779 }
780 
781 
782 /*********************************************************************
783  *  Timer routine
784  *
785  *  This routine checks for link status and updates statistics.
786  *
787  **********************************************************************/
788 
789 void
790 ixgb_local_timer(void *arg)
791 {
792 	struct ifnet   *ifp;
793 	struct ixgb_softc *sc = arg;
794 	int s;
795 
796 	ifp = &sc->interface_data.ac_if;
797 
798 	s = splnet();
799 
800 	ixgb_check_for_link(&sc->hw);
801 	ixgb_update_link_status(sc);
802 	ixgb_update_stats_counters(sc);
803 #ifdef IXGB_DEBUG
804 	if (ixgb_display_debug_stats && ifp->if_flags & IFF_RUNNING)
805 		ixgb_print_hw_stats(sc);
806 #endif
807 
808 	timeout_add_sec(&sc->timer_handle, 1);
809 
810 	splx(s);
811 }
812 
813 void
814 ixgb_update_link_status(struct ixgb_softc *sc)
815 {
816 	struct ifnet *ifp = &sc->interface_data.ac_if;
817 
818 	if (sc->hw.link_up) {
819 		if (!sc->link_active) {
820 			ifp->if_baudrate = IF_Gbps(10);
821 			sc->link_active = 1;
822 			ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
823 			if_link_state_change(ifp);
824 		}
825 	} else {
826 		if (sc->link_active) {
827 			ifp->if_baudrate = 0;
828 			sc->link_active = 0;
829 			ifp->if_link_state = LINK_STATE_DOWN;
830 			if_link_state_change(ifp);
831 		}
832 	}
833 }
834 
835 /*********************************************************************
836  *
837  *  This routine disables all traffic on the adapter by issuing a
838  *  global reset on the MAC and deallocates TX/RX buffers.
839  *
840  **********************************************************************/
841 
842 void
843 ixgb_stop(void *arg)
844 {
845 	struct ifnet   *ifp;
846 	struct ixgb_softc *sc = arg;
847 	ifp = &sc->interface_data.ac_if;
848 
849 	INIT_DEBUGOUT("ixgb_stop: begin\n");
850 	ixgb_disable_intr(sc);
851 	sc->hw.adapter_stopped = FALSE;
852 	ixgb_adapter_stop(&sc->hw);
853 	timeout_del(&sc->timer_handle);
854 
855 	/* Tell the stack that the interface is no longer active */
856 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
857 
858 	ixgb_free_transmit_structures(sc);
859 	ixgb_free_receive_structures(sc);
860 }
861 
862 
863 /*********************************************************************
864  *
865  *  Determine hardware revision.
866  *
867  **********************************************************************/
868 void
869 ixgb_identify_hardware(struct ixgb_softc *sc)
870 {
871 	u_int32_t	reg;
872 	struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
873 
874 	/* Make sure our PCI config space has the necessary stuff set */
875 	sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
876 					    PCI_COMMAND_STATUS_REG);
877 
878 	/* Save off the information about this board */
879 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
880 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
881 
882 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
883 	sc->hw.revision_id = PCI_REVISION(reg);
884 
885 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
886 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
887 	sc->hw.subsystem_id = PCI_PRODUCT(reg);
888 
889 	/* Set MacType, etc. based on this PCI info */
890 	switch (sc->hw.device_id) {
891 	case IXGB_DEVICE_ID_82597EX:
892 	case IXGB_DEVICE_ID_82597EX_SR:
893 	case IXGB_DEVICE_ID_82597EX_LR:
894 	case IXGB_DEVICE_ID_82597EX_CX4:
895 		sc->hw.mac_type = ixgb_82597;
896 		break;
897 	default:
898 		INIT_DEBUGOUT1("Unknown device if 0x%x", sc->hw.device_id);
899 		printf("%s: unsupported device id 0x%x\n",
900 		    sc->sc_dv.dv_xname, sc->hw.device_id);
901 	}
902 }
903 
904 int
905 ixgb_allocate_pci_resources(struct ixgb_softc *sc)
906 
907 {
908 	int val;
909 	pci_intr_handle_t	ih;
910 	const char		*intrstr = NULL;
911 	struct pci_attach_args *pa =  &sc->osdep.ixgb_pa;
912 	pci_chipset_tag_t	pc = pa->pa_pc;
913 
914 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, IXGB_MMBA);
915 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
916 		printf(": mmba is not mem space\n");
917 		return (ENXIO);
918 	}
919 	if (pci_mapreg_map(pa, IXGB_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
920 	    &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
921 	    &sc->osdep.ixgb_membase, &sc->osdep.ixgb_memsize, 0)) {
922 		printf(": cannot find mem space\n");
923 		return (ENXIO);
924 	}
925 
926 	if (pci_intr_map(pa, &ih)) {
927 		printf(": couldn't map interrupt\n");
928 		return (ENXIO);
929 	}
930 
931 	sc->hw.back = &sc->osdep;
932 
933 	intrstr = pci_intr_string(pc, ih);
934 	sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, ixgb_intr, sc,
935 					    sc->sc_dv.dv_xname);
936 	if (sc->sc_intrhand == NULL) {
937 		printf(": couldn't establish interrupt");
938 		if (intrstr != NULL)
939 			printf(" at %s", intrstr);
940 		printf("\n");
941 		return (ENXIO);
942 	}
943 	printf(": %s", intrstr);
944 
945 	return (0);
946 }
947 
948 void
949 ixgb_free_pci_resources(struct ixgb_softc *sc)
950 {
951 	struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
952 	pci_chipset_tag_t	pc = pa->pa_pc;
953 
954 	if (sc->sc_intrhand)
955 		pci_intr_disestablish(pc, sc->sc_intrhand);
956 	sc->sc_intrhand = 0;
957 
958 	if (sc->osdep.ixgb_membase)
959 		bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
960 				sc->osdep.ixgb_memsize);
961 	sc->osdep.ixgb_membase = 0;
962 }
963 
964 /*********************************************************************
965  *
966  *  Initialize the hardware to a configuration as specified by the
967  *  adapter structure. The controller is reset, the EEPROM is
968  *  verified, the MAC address is set, then the shared initialization
969  *  routines are called.
970  *
971  **********************************************************************/
972 int
973 ixgb_hardware_init(struct ixgb_softc *sc)
974 {
975 	/* Issue a global reset */
976 	sc->hw.adapter_stopped = FALSE;
977 	ixgb_adapter_stop(&sc->hw);
978 
979 	/* Make sure we have a good EEPROM before we read from it */
980 	if (!ixgb_validate_eeprom_checksum(&sc->hw)) {
981 		printf("%s: The EEPROM Checksum Is Not Valid\n",
982 		       sc->sc_dv.dv_xname);
983 		return (EIO);
984 	}
985 	if (!ixgb_init_hw(&sc->hw)) {
986 		printf("%s: Hardware Initialization Failed",
987 		       sc->sc_dv.dv_xname);
988 		return (EIO);
989 	}
990 	bcopy(sc->hw.curr_mac_addr, sc->interface_data.ac_enaddr,
991 	      IXGB_ETH_LENGTH_OF_ADDRESS);
992 
993 	return (0);
994 }
995 
996 /*********************************************************************
997  *
998  *  Setup networking device structure and register an interface.
999  *
1000  **********************************************************************/
1001 void
1002 ixgb_setup_interface(struct ixgb_softc *sc)
1003 {
1004 	struct ifnet   *ifp;
1005 	INIT_DEBUGOUT("ixgb_setup_interface: begin");
1006 
1007 	ifp = &sc->interface_data.ac_if;
1008 	strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
1009 
1010 	ifp->if_softc = sc;
1011 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1012 	ifp->if_ioctl = ixgb_ioctl;
1013 	ifp->if_start = ixgb_start;
1014 	ifp->if_watchdog = ixgb_watchdog;
1015 	ifp->if_hardmtu =
1016 		IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN - ETHER_CRC_LEN;
1017 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1018 	IFQ_SET_READY(&ifp->if_snd);
1019 
1020 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1021 
1022 #if NVLAN > 0
1023 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1024 #endif
1025 
1026 #ifdef IXGB_CSUM_OFFLOAD
1027 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4;
1028 #endif
1029 
1030 	/*
1031 	 * Specify the media types supported by this adapter and register
1032 	 * callbacks to update media and link information
1033 	 */
1034 	ifmedia_init(&sc->media, IFM_IMASK, ixgb_media_change,
1035 		     ixgb_media_status);
1036 	if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
1037 	    (sc->hw.phy_type == ixgb_phy_type_txn17401)) {
1038 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR |
1039 		    IFM_FDX, 0, NULL);
1040 	} else {
1041 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR |
1042 		    IFM_FDX, 0, NULL);
1043 	}
1044 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1045 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1046 
1047 	if_attach(ifp);
1048 	ether_ifattach(ifp);
1049 }
1050 
1051 /********************************************************************
1052  * Manage DMA'able memory.
1053  *******************************************************************/
1054 int
1055 ixgb_dma_malloc(struct ixgb_softc *sc, bus_size_t size,
1056 		struct ixgb_dma_alloc * dma, int mapflags)
1057 {
1058 	int r;
1059 
1060 	dma->dma_tag = sc->osdep.ixgb_pa.pa_dmat;
1061 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1062 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1063 	if (r != 0) {
1064 		printf("%s: ixgb_dma_malloc: bus_dmamap_create failed; "
1065 			"error %u\n", sc->sc_dv.dv_xname, r);
1066 		goto fail_0;
1067 	}
1068 
1069 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1070 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1071 	if (r != 0) {
1072 		printf("%s: ixgb_dma_malloc: bus_dmammem_alloc failed; "
1073 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
1074 			(unsigned long)size, r);
1075 		goto fail_1;
1076 	}
1077 
1078 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1079 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1080 	if (r != 0) {
1081 		printf("%s: ixgb_dma_malloc: bus_dmammem_map failed; "
1082 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
1083 			(unsigned long)size, r);
1084 		goto fail_2;
1085 	}
1086 
1087 	r = bus_dmamap_load(sc->osdep.ixgb_pa.pa_dmat, dma->dma_map,
1088 			    dma->dma_vaddr, size, NULL,
1089 			    mapflags | BUS_DMA_NOWAIT);
1090 	if (r != 0) {
1091 		printf("%s: ixgb_dma_malloc: bus_dmamap_load failed; "
1092 			"error %u\n", sc->sc_dv.dv_xname, r);
1093 		goto fail_3;
1094 	}
1095 
1096 	dma->dma_size = size;
1097 	return (0);
1098 
1099 fail_3:
1100 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1101 fail_2:
1102 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1103 fail_1:
1104 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1105 fail_0:
1106 	dma->dma_map = NULL;
1107 	dma->dma_tag = NULL;
1108 
1109 	return (r);
1110 }
1111 
1112 void
1113 ixgb_dma_free(struct ixgb_softc *sc, struct ixgb_dma_alloc *dma)
1114 {
1115 	if (dma->dma_tag == NULL)
1116 		return;
1117 
1118 	if (dma->dma_map != NULL) {
1119 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1120 		    dma->dma_map->dm_mapsize,
1121 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1122 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1123 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1124 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1125 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1126 	}
1127 }
1128 
1129 /*********************************************************************
1130  *
1131  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1132  *  the information needed to transmit a packet on the wire.
1133  *
1134  **********************************************************************/
1135 int
1136 ixgb_allocate_transmit_structures(struct ixgb_softc *sc)
1137 {
1138 	if (!(sc->tx_buffer_area = mallocarray(sc->num_tx_desc,
1139 	    sizeof(struct ixgb_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1140 		printf("%s: Unable to allocate tx_buffer memory\n",
1141 		       sc->sc_dv.dv_xname);
1142 		return (ENOMEM);
1143 	}
1144 
1145 	return (0);
1146 }
1147 
1148 /*********************************************************************
1149  *
1150  *  Allocate and initialize transmit structures.
1151  *
1152  **********************************************************************/
1153 int
1154 ixgb_setup_transmit_structures(struct ixgb_softc *sc)
1155 {
1156 	struct	ixgb_buffer *tx_buffer;
1157 	int error, i;
1158 
1159 	if ((error = ixgb_allocate_transmit_structures(sc)) != 0)
1160 		goto fail;
1161 
1162 	bzero((void *)sc->tx_desc_base,
1163 	      (sizeof(struct ixgb_tx_desc)) * sc->num_tx_desc);
1164 
1165 	sc->txtag = sc->osdep.ixgb_pa.pa_dmat;
1166 
1167 	tx_buffer = sc->tx_buffer_area;
1168 	for (i = 0; i < sc->num_tx_desc; i++) {
1169 		error = bus_dmamap_create(sc->txtag, IXGB_MAX_JUMBO_FRAME_SIZE,
1170 			    IXGB_MAX_SCATTER, IXGB_MAX_JUMBO_FRAME_SIZE, 0,
1171 			    BUS_DMA_NOWAIT, &tx_buffer->map);
1172 		if (error != 0) {
1173 			printf("%s: Unable to create TX DMA map\n",
1174 			    sc->sc_dv.dv_xname);
1175 			goto fail;
1176 		}
1177 		tx_buffer++;
1178 	}
1179 
1180 	sc->next_avail_tx_desc = 0;
1181 	sc->oldest_used_tx_desc = 0;
1182 
1183 	/* Set number of descriptors available */
1184 	sc->num_tx_desc_avail = sc->num_tx_desc;
1185 
1186 	/* Set checksum context */
1187 	sc->active_checksum_context = OFFLOAD_NONE;
1188 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1189 	   sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1190 
1191 	return (0);
1192 
1193 fail:
1194 	ixgb_free_transmit_structures(sc);
1195 	return (error);
1196 }
1197 
1198 /*********************************************************************
1199  *
1200  *  Enable transmit unit.
1201  *
1202  **********************************************************************/
1203 void
1204 ixgb_initialize_transmit_unit(struct ixgb_softc *sc)
1205 {
1206 	u_int32_t       reg_tctl;
1207 	u_int64_t       bus_addr;
1208 
1209 	/* Setup the Base and Length of the Tx Descriptor Ring */
1210 	bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
1211 	IXGB_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
1212 	IXGB_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
1213 	IXGB_WRITE_REG(&sc->hw, TDLEN,
1214 		       sc->num_tx_desc *
1215 		       sizeof(struct ixgb_tx_desc));
1216 
1217 	/* Setup the HW Tx Head and Tail descriptor pointers */
1218 	IXGB_WRITE_REG(&sc->hw, TDH, 0);
1219 	IXGB_WRITE_REG(&sc->hw, TDT, 0);
1220 
1221 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
1222 		     IXGB_READ_REG(&sc->hw, TDBAL),
1223 		     IXGB_READ_REG(&sc->hw, TDLEN));
1224 
1225 	IXGB_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
1226 
1227 	/* Program the Transmit Control Register */
1228 	reg_tctl = IXGB_READ_REG(&sc->hw, TCTL);
1229 	reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1230 	IXGB_WRITE_REG(&sc->hw, TCTL, reg_tctl);
1231 
1232 	/* Setup Transmit Descriptor Settings for this adapter */
1233 	sc->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1234 
1235 	if (sc->tx_int_delay > 0)
1236 		sc->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1237 }
1238 
1239 /*********************************************************************
1240  *
1241  *  Free all transmit related data structures.
1242  *
1243  **********************************************************************/
1244 void
1245 ixgb_free_transmit_structures(struct ixgb_softc *sc)
1246 {
1247 	struct ixgb_buffer *tx_buffer;
1248 	int             i;
1249 
1250 	INIT_DEBUGOUT("free_transmit_structures: begin");
1251 
1252 	if (sc->tx_buffer_area != NULL) {
1253 		tx_buffer = sc->tx_buffer_area;
1254 		for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
1255 			if (tx_buffer->map != NULL &&
1256 			    tx_buffer->map->dm_nsegs > 0) {
1257 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
1258 				    0, tx_buffer->map->dm_mapsize,
1259 				    BUS_DMASYNC_POSTWRITE);
1260 				bus_dmamap_unload(sc->txtag,
1261 				    tx_buffer->map);
1262 			}
1263 
1264 			if (tx_buffer->m_head != NULL) {
1265 				m_freem(tx_buffer->m_head);
1266 				tx_buffer->m_head = NULL;
1267 			}
1268 			if (tx_buffer->map != NULL) {
1269 				bus_dmamap_destroy(sc->txtag,
1270 				    tx_buffer->map);
1271 				tx_buffer->map = NULL;
1272 			}
1273 		}
1274 	}
1275 	if (sc->tx_buffer_area != NULL) {
1276 		free(sc->tx_buffer_area, M_DEVBUF, 0);
1277 		sc->tx_buffer_area = NULL;
1278 	}
1279 	if (sc->txtag != NULL) {
1280 		sc->txtag = NULL;
1281 	}
1282 }
1283 
1284 /*********************************************************************
1285  *
1286  *  The offload context needs to be set when we transfer the first
1287  *  packet of a particular protocol (TCP/UDP). We change the
1288  *  context only if the protocol type changes.
1289  *
1290  **********************************************************************/
1291 void
1292 ixgb_transmit_checksum_setup(struct ixgb_softc *sc,
1293 			     struct mbuf *mp,
1294 			     u_int8_t *txd_popts)
1295 {
1296 	struct ixgb_context_desc *TXD;
1297 	struct ixgb_buffer *tx_buffer;
1298 	int             curr_txd;
1299 
1300 	if (mp->m_pkthdr.csum_flags) {
1301 
1302 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) {
1303 			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1304 			if (sc->active_checksum_context == OFFLOAD_TCP_IP)
1305 				return;
1306 			else
1307 				sc->active_checksum_context = OFFLOAD_TCP_IP;
1308 
1309 		} else if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) {
1310 			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1311 			if (sc->active_checksum_context == OFFLOAD_UDP_IP)
1312 				return;
1313 			else
1314 				sc->active_checksum_context = OFFLOAD_UDP_IP;
1315 		} else {
1316 			*txd_popts = 0;
1317 			return;
1318 		}
1319 	} else {
1320 		*txd_popts = 0;
1321 		return;
1322 	}
1323 
1324 	/*
1325 	 * If we reach this point, the checksum offload context needs to be
1326 	 * reset.
1327 	 */
1328 	curr_txd = sc->next_avail_tx_desc;
1329 	tx_buffer = &sc->tx_buffer_area[curr_txd];
1330 	TXD = (struct ixgb_context_desc *) & sc->tx_desc_base[curr_txd];
1331 
1332 	TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1333 	TXD->tucse = 0;
1334 
1335 	TXD->mss = 0;
1336 
1337 	if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
1338 		TXD->tucso =
1339 			ENET_HEADER_SIZE + sizeof(struct ip) +
1340 			offsetof(struct tcphdr, th_sum);
1341 	} else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
1342 		TXD->tucso =
1343 			ENET_HEADER_SIZE + sizeof(struct ip) +
1344 			offsetof(struct udphdr, uh_sum);
1345 	}
1346 	TXD->cmd_type_len = htole32(IXGB_CONTEXT_DESC_CMD_TCP |
1347 	    IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE);
1348 
1349 	tx_buffer->m_head = NULL;
1350 
1351 	if (++curr_txd == sc->num_tx_desc)
1352 		curr_txd = 0;
1353 
1354 	sc->num_tx_desc_avail--;
1355 	sc->next_avail_tx_desc = curr_txd;
1356 }
1357 
1358 /**********************************************************************
1359  *
1360  *  Examine each tx_buffer in the used queue. If the hardware is done
1361  *  processing the packet then free associated resources. The
1362  *  tx_buffer is put back on the free queue.
1363  *
1364  **********************************************************************/
1365 void
1366 ixgb_txeof(struct ixgb_softc *sc)
1367 {
1368 	int             i, num_avail;
1369 	struct ixgb_buffer *tx_buffer;
1370 	struct ixgb_tx_desc *tx_desc;
1371 	struct ifnet	*ifp = &sc->interface_data.ac_if;
1372 
1373 	if (sc->num_tx_desc_avail == sc->num_tx_desc)
1374 		return;
1375 
1376 	num_avail = sc->num_tx_desc_avail;
1377 	i = sc->oldest_used_tx_desc;
1378 
1379 	tx_buffer = &sc->tx_buffer_area[i];
1380 	tx_desc = &sc->tx_desc_base[i];
1381 
1382 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1383 	    sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1384 	while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1385 
1386 		tx_desc->status = 0;
1387 		num_avail++;
1388 
1389 		if (tx_buffer->m_head != NULL) {
1390 			ifp->if_opackets++;
1391 
1392 			if (tx_buffer->map->dm_nsegs > 0) {
1393 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
1394 				    0, tx_buffer->map->dm_mapsize,
1395 				    BUS_DMASYNC_POSTWRITE);
1396 				bus_dmamap_unload(sc->txtag, tx_buffer->map);
1397 			}
1398 
1399 			m_freem(tx_buffer->m_head);
1400 			tx_buffer->m_head = NULL;
1401 		}
1402 		if (++i == sc->num_tx_desc)
1403 			i = 0;
1404 
1405 		tx_buffer = &sc->tx_buffer_area[i];
1406 		tx_desc = &sc->tx_desc_base[i];
1407 	}
1408 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1409 	    sc->txdma.dma_map->dm_mapsize,
1410 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1411 
1412 	sc->oldest_used_tx_desc = i;
1413 
1414 	/*
1415 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
1416 	 * it is OK to send packets. If there are no pending descriptors,
1417 	 * clear the timeout. Otherwise, if some descriptors have been freed,
1418 	 * restart the timeout.
1419 	 */
1420 	if (num_avail > IXGB_TX_CLEANUP_THRESHOLD)
1421 		ifp->if_flags &= ~IFF_OACTIVE;
1422 
1423 	/* All clean, turn off the timer */
1424 	if (num_avail == sc->num_tx_desc)
1425 		ifp->if_timer = 0;
1426 	/* Some cleaned, reset the timer */
1427 	else if (num_avail != sc->num_tx_desc_avail)
1428 		ifp->if_timer = IXGB_TX_TIMEOUT;
1429 
1430 	sc->num_tx_desc_avail = num_avail;
1431 }
1432 
1433 
1434 /*********************************************************************
1435  *
1436  *  Get a buffer from system mbuf buffer pool.
1437  *
1438  **********************************************************************/
1439 int
1440 ixgb_get_buf(struct ixgb_softc *sc, int i,
1441 	     struct mbuf *nmp)
1442 {
1443 	struct mbuf *mp = nmp;
1444 	struct ixgb_buffer *rx_buffer;
1445 	int             error;
1446 
1447 	if (mp == NULL) {
1448 		MGETHDR(mp, M_DONTWAIT, MT_DATA);
1449 		if (mp == NULL) {
1450 			sc->mbuf_alloc_failed++;
1451 			return (ENOBUFS);
1452 		}
1453 		MCLGET(mp, M_DONTWAIT);
1454 		if ((mp->m_flags & M_EXT) == 0) {
1455 			m_freem(mp);
1456 			sc->mbuf_cluster_failed++;
1457 			return (ENOBUFS);
1458 		}
1459 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1460 	} else {
1461 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1462 		mp->m_data = mp->m_ext.ext_buf;
1463 		mp->m_next = NULL;
1464 	}
1465 
1466 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1467 		m_adj(mp, ETHER_ALIGN);
1468 
1469 	rx_buffer = &sc->rx_buffer_area[i];
1470 
1471 	/*
1472 	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1473 	 * machinery to arrange the memory mapping.
1474 	 */
1475 	error = bus_dmamap_load_mbuf(sc->rxtag, rx_buffer->map,
1476 	    mp, BUS_DMA_NOWAIT);
1477 	if (error) {
1478 		m_freem(mp);
1479 		return (error);
1480 	}
1481 	rx_buffer->m_head = mp;
1482 	bzero(&sc->rx_desc_base[i], sizeof(sc->rx_desc_base[i]));
1483 	sc->rx_desc_base[i].buff_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr);
1484 	bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0,
1485 	    rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD);
1486 
1487 	return (0);
1488 }
1489 
1490 /*********************************************************************
1491  *
1492  *  Allocate memory for rx_buffer structures. Since we use one
1493  *  rx_buffer per received packet, the maximum number of rx_buffer's
1494  *  that we'll need is equal to the number of receive descriptors
1495  *  that we've allocated.
1496  *
1497  **********************************************************************/
1498 int
1499 ixgb_allocate_receive_structures(struct ixgb_softc *sc)
1500 {
1501 	int             i, error;
1502 	struct ixgb_buffer *rx_buffer;
1503 
1504 	if (!(sc->rx_buffer_area = mallocarray(sc->num_rx_desc,
1505 	    sizeof(struct ixgb_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1506 		printf("%s: Unable to allocate rx_buffer memory\n",
1507 		       sc->sc_dv.dv_xname);
1508 		return (ENOMEM);
1509 	}
1510 
1511 	sc->rxtag = sc->osdep.ixgb_pa.pa_dmat;
1512 
1513 	rx_buffer = sc->rx_buffer_area;
1514 	for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
1515 		error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
1516 					  MCLBYTES, 0, BUS_DMA_NOWAIT,
1517 					  &rx_buffer->map);
1518 		if (error != 0) {
1519 			printf("%s: ixgb_allocate_receive_structures: "
1520 			       "bus_dmamap_create failed; error %u\n",
1521 			       sc->sc_dv.dv_xname, error);
1522 			goto fail;
1523 		}
1524 	}
1525 
1526 	for (i = 0; i < sc->num_rx_desc; i++) {
1527 		error = ixgb_get_buf(sc, i, NULL);
1528 		if (error != 0)
1529 			goto fail;
1530 	}
1531 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1532 	    sc->rxdma.dma_map->dm_mapsize,
1533 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1534 
1535 	return (0);
1536 
1537 fail:
1538 	ixgb_free_receive_structures(sc);
1539 	return (error);
1540 }
1541 
1542 /*********************************************************************
1543  *
1544  *  Allocate and initialize receive structures.
1545  *
1546  **********************************************************************/
1547 int
1548 ixgb_setup_receive_structures(struct ixgb_softc *sc)
1549 {
1550 	bzero((void *)sc->rx_desc_base,
1551 	      (sizeof(struct ixgb_rx_desc)) * sc->num_rx_desc);
1552 
1553 	if (ixgb_allocate_receive_structures(sc))
1554 		return (ENOMEM);
1555 
1556 	/* Setup our descriptor pointers */
1557 	sc->next_rx_desc_to_check = 0;
1558 	sc->next_rx_desc_to_use = 0;
1559 	return (0);
1560 }
1561 
1562 /*********************************************************************
1563  *
1564  *  Enable receive unit.
1565  *
1566  **********************************************************************/
1567 void
1568 ixgb_initialize_receive_unit(struct ixgb_softc *sc)
1569 {
1570 	u_int32_t       reg_rctl;
1571 	u_int32_t       reg_rxcsum;
1572 	u_int32_t       reg_rxdctl;
1573 	u_int64_t       bus_addr;
1574 
1575 	/*
1576 	 * Make sure receives are disabled while setting up the descriptor
1577 	 * ring
1578 	 */
1579 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
1580 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1581 
1582 	/* Set the Receive Delay Timer Register */
1583 	IXGB_WRITE_REG(&sc->hw, RDTR,
1584 		       sc->rx_int_delay);
1585 
1586 	/* Setup the Base and Length of the Rx Descriptor Ring */
1587 	bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
1588 	IXGB_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
1589 	IXGB_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
1590 	IXGB_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
1591 		       sizeof(struct ixgb_rx_desc));
1592 
1593 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
1594 	IXGB_WRITE_REG(&sc->hw, RDH, 0);
1595 
1596 	IXGB_WRITE_REG(&sc->hw, RDT, sc->num_rx_desc - 1);
1597 
1598 	reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1599 		| RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1600 		| RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1601 	IXGB_WRITE_REG(&sc->hw, RXDCTL, reg_rxdctl);
1602 
1603 	sc->raidc = 1;
1604 	if (sc->raidc) {
1605 		uint32_t        raidc;
1606 		uint8_t         poll_threshold;
1607 #define IXGB_RAIDC_POLL_DEFAULT 120
1608 
1609 		poll_threshold = ((sc->num_rx_desc - 1) >> 3);
1610 		poll_threshold >>= 1;
1611 		poll_threshold &= 0x3F;
1612 		raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1613 			(IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1614 			(sc->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1615 			poll_threshold;
1616 		IXGB_WRITE_REG(&sc->hw, RAIDC, raidc);
1617 	}
1618 
1619 	/* Enable Receive Checksum Offload for TCP and UDP ? */
1620 	reg_rxcsum = IXGB_READ_REG(&sc->hw, RXCSUM);
1621 	reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1622 	IXGB_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
1623 
1624 	/* Setup the Receive Control Register */
1625 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
1626 	reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1627 	reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1628 		IXGB_RCTL_CFF |
1629 		(sc->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1630 
1631 	switch (sc->rx_buffer_len) {
1632 	default:
1633 	case IXGB_RXBUFFER_2048:
1634 		reg_rctl |= IXGB_RCTL_BSIZE_2048;
1635 		break;
1636 	case IXGB_RXBUFFER_4096:
1637 		reg_rctl |= IXGB_RCTL_BSIZE_4096;
1638 		break;
1639 	case IXGB_RXBUFFER_8192:
1640 		reg_rctl |= IXGB_RCTL_BSIZE_8192;
1641 		break;
1642 	case IXGB_RXBUFFER_16384:
1643 		reg_rctl |= IXGB_RCTL_BSIZE_16384;
1644 		break;
1645 	}
1646 
1647 	reg_rctl |= IXGB_RCTL_RXEN;
1648 
1649 	/* Enable Receives */
1650 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1651 }
1652 
1653 /*********************************************************************
1654  *
1655  *  Free receive related data structures.
1656  *
1657  **********************************************************************/
1658 void
1659 ixgb_free_receive_structures(struct ixgb_softc *sc)
1660 {
1661 	struct ixgb_buffer *rx_buffer;
1662 	int             i;
1663 
1664 	INIT_DEBUGOUT("free_receive_structures: begin");
1665 
1666 	if (sc->rx_buffer_area != NULL) {
1667 		rx_buffer = sc->rx_buffer_area;
1668 		for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
1669 			if (rx_buffer->map != NULL &&
1670 			    rx_buffer->map->dm_nsegs > 0) {
1671 				bus_dmamap_sync(sc->rxtag, rx_buffer->map,
1672 				    0, rx_buffer->map->dm_mapsize,
1673 				    BUS_DMASYNC_POSTREAD);
1674 				bus_dmamap_unload(sc->rxtag,
1675 				    rx_buffer->map);
1676 			}
1677 			if (rx_buffer->m_head != NULL) {
1678 				m_freem(rx_buffer->m_head);
1679 				rx_buffer->m_head = NULL;
1680 			}
1681 			if (rx_buffer->map != NULL) {
1682 				bus_dmamap_destroy(sc->rxtag,
1683 				    rx_buffer->map);
1684 				rx_buffer->map = NULL;
1685 			}
1686 		}
1687 	}
1688 	if (sc->rx_buffer_area != NULL) {
1689 		free(sc->rx_buffer_area, M_DEVBUF, 0);
1690 		sc->rx_buffer_area = NULL;
1691 	}
1692 	if (sc->rxtag != NULL)
1693 		sc->rxtag = NULL;
1694 }
1695 
1696 /*********************************************************************
1697  *
1698  *  This routine executes in interrupt context. It replenishes
1699  *  the mbufs in the descriptor and sends data which has been
1700  *  dma'ed into host memory to upper layer.
1701  *
1702  *  We loop at most count times if count is > 0, or until done if
1703  *  count < 0.
1704  *
1705  *********************************************************************/
1706 void
1707 ixgb_rxeof(struct ixgb_softc *sc, int count)
1708 {
1709 	struct ifnet   *ifp;
1710 	struct mbuf    *mp;
1711 	int             eop = 0;
1712 	int             len;
1713 	u_int8_t        accept_frame = 0;
1714 	int             i;
1715 	int             next_to_use = 0;
1716 	int             eop_desc;
1717 
1718 	/* Pointer to the receive descriptor being examined. */
1719 	struct ixgb_rx_desc *current_desc;
1720 
1721 	ifp = &sc->interface_data.ac_if;
1722 	i = sc->next_rx_desc_to_check;
1723 	next_to_use = sc->next_rx_desc_to_use;
1724 	eop_desc = sc->next_rx_desc_to_check;
1725 	current_desc = &sc->rx_desc_base[i];
1726 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1727 	    sc->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1728 
1729 	if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD))
1730 		return;
1731 
1732 	while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) &&
1733 		    (count != 0) &&
1734 		    (ifp->if_flags & IFF_RUNNING)) {
1735 
1736 		mp = sc->rx_buffer_area[i].m_head;
1737 		bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map,
1738 		    0, sc->rx_buffer_area[i].map->dm_mapsize,
1739 		    BUS_DMASYNC_POSTREAD);
1740 		bus_dmamap_unload(sc->rxtag, sc->rx_buffer_area[i].map);
1741 
1742 		accept_frame = 1;
1743 		if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
1744 			count--;
1745 			eop = 1;
1746 		} else {
1747 			eop = 0;
1748 		}
1749 		len = letoh16(current_desc->length);
1750 
1751 		if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
1752 			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
1753 					    IXGB_RX_DESC_ERRORS_RXE))
1754 			accept_frame = 0;
1755 		if (accept_frame) {
1756 
1757 			/* Assign correct length to the current fragment */
1758 			mp->m_len = len;
1759 
1760 			if (sc->fmp == NULL) {
1761 				mp->m_pkthdr.len = len;
1762 				sc->fmp = mp;	/* Store the first mbuf */
1763 				sc->lmp = mp;
1764 			} else {
1765 				/* Chain mbuf's together */
1766 				mp->m_flags &= ~M_PKTHDR;
1767 				sc->lmp->m_next = mp;
1768 				sc->lmp = sc->lmp->m_next;
1769 				sc->fmp->m_pkthdr.len += len;
1770 			}
1771 
1772 			if (eop) {
1773 				eop_desc = i;
1774 				sc->fmp->m_pkthdr.rcvif = ifp;
1775 				ifp->if_ipackets++;
1776 				ixgb_receive_checksum(sc, current_desc, sc->fmp);
1777 
1778 #if NVLAN > 0
1779 				if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
1780 					sc->fmp->m_pkthdr.ether_vtag =
1781 					    letoh16(current_desc->special);
1782 					sc->fmp->m_flags |= M_VLANTAG;
1783 				}
1784 #endif
1785 
1786 #if NBPFILTER > 0
1787 				/*
1788 				 * Handle BPF listeners. Let the BPF
1789 				 * user see the packet.
1790 				 */
1791 				if (ifp->if_bpf)
1792 					bpf_mtap_ether(ifp->if_bpf, sc->fmp,
1793 					    BPF_DIRECTION_IN);
1794 #endif
1795 
1796 				ether_input_mbuf(ifp, sc->fmp);
1797 				sc->fmp = NULL;
1798 				sc->lmp = NULL;
1799 			}
1800 			sc->rx_buffer_area[i].m_head = NULL;
1801 		} else {
1802 			sc->dropped_pkts++;
1803 			if (sc->fmp != NULL)
1804 				m_freem(sc->fmp);
1805 			sc->fmp = NULL;
1806 			sc->lmp = NULL;
1807 		}
1808 
1809 		/* Zero out the receive descriptors status  */
1810 		current_desc->status = 0;
1811 		bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1812 		    sc->rxdma.dma_map->dm_mapsize,
1813 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1814 
1815 		/* Advance our pointers to the next descriptor */
1816 		if (++i == sc->num_rx_desc) {
1817 			i = 0;
1818 			current_desc = sc->rx_desc_base;
1819 		} else
1820 			current_desc++;
1821 	}
1822 	sc->next_rx_desc_to_check = i;
1823 
1824 	if (--i < 0)
1825 		i = (sc->num_rx_desc - 1);
1826 
1827 	/*
1828 	 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
1829  	 * memory corruption). Avoid using and re-submitting the most recently received RX
1830 	 * descriptor back to hardware.
1831 	 *
1832 	 * if(Last written back descriptor == EOP bit set descriptor)
1833 	 * 	then avoid re-submitting the most recently received RX descriptor
1834 	 *	back to hardware.
1835 	 * if(Last written back descriptor != EOP bit set descriptor)
1836 	 *	then avoid re-submitting the most recently received RX descriptors
1837 	 * 	till last EOP bit set descriptor.
1838 	 */
1839 	if (eop_desc != i) {
1840 		if (++eop_desc == sc->num_rx_desc)
1841 			eop_desc = 0;
1842 		i = eop_desc;
1843 	}
1844 	/* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
1845 	while (next_to_use != i) {
1846 		current_desc = &sc->rx_desc_base[next_to_use];
1847 		if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
1848 			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
1849 					     IXGB_RX_DESC_ERRORS_RXE))) {
1850 			mp = sc->rx_buffer_area[next_to_use].m_head;
1851 			ixgb_get_buf(sc, next_to_use, mp);
1852 		} else {
1853 			if (ixgb_get_buf(sc, next_to_use, NULL) == ENOBUFS)
1854 				break;
1855 		}
1856 		/* Advance our pointers to the next descriptor */
1857 		if (++next_to_use == sc->num_rx_desc)
1858 			next_to_use = 0;
1859 	}
1860 	sc->next_rx_desc_to_use = next_to_use;
1861 	if (--next_to_use < 0)
1862                 next_to_use = (sc->num_rx_desc - 1);
1863         /* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
1864         IXGB_WRITE_REG(&sc->hw, RDT, next_to_use);
1865 }
1866 
1867 /*********************************************************************
1868  *
1869  *  Verify that the hardware indicated that the checksum is valid.
1870  *  Inform the stack about the status of checksum so that stack
1871  *  doesn't spend time verifying the checksum.
1872  *
1873  *********************************************************************/
1874 void
1875 ixgb_receive_checksum(struct ixgb_softc *sc,
1876 		      struct ixgb_rx_desc *rx_desc,
1877 		      struct mbuf *mp)
1878 {
1879 	if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
1880 		mp->m_pkthdr.csum_flags = 0;
1881 		return;
1882 	}
1883 
1884 	if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
1885 		/* Did it pass? */
1886 		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
1887 			/* IP Checksum Good */
1888 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
1889 
1890 		} else {
1891 			mp->m_pkthdr.csum_flags = 0;
1892 		}
1893 	}
1894 	if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
1895 		/* Did it pass? */
1896 		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
1897 			mp->m_pkthdr.csum_flags |=
1898 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1899 		}
1900 	}
1901 }
1902 
1903 /*
1904  * This turns on the hardware offload of the VLAN
1905  * tag insertion and strip
1906  */
1907 void
1908 ixgb_enable_hw_vlans(struct ixgb_softc *sc)
1909 {
1910 	uint32_t ctrl;
1911 
1912 	ctrl = IXGB_READ_REG(&sc->hw, CTRL0);
1913 	ctrl |= IXGB_CTRL0_VME;
1914 	IXGB_WRITE_REG(&sc->hw, CTRL0, ctrl);
1915 }
1916 
1917 void
1918 ixgb_enable_intr(struct ixgb_softc *sc)
1919 {
1920 	uint32_t val;
1921 
1922 	val = IXGB_INT_RXT0 | IXGB_INT_TXDW | IXGB_INT_RXDMT0 |
1923 	      IXGB_INT_LSC | IXGB_INT_RXO;
1924 	if (sc->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
1925 		val |= IXGB_INT_GPI0;
1926 	IXGB_WRITE_REG(&sc->hw, IMS, val);
1927 }
1928 
1929 void
1930 ixgb_disable_intr(struct ixgb_softc *sc)
1931 {
1932 	IXGB_WRITE_REG(&sc->hw, IMC, ~0);
1933 }
1934 
1935 void
1936 ixgb_write_pci_cfg(struct ixgb_hw *hw,
1937 		   uint32_t reg,
1938 		   uint16_t *value)
1939 {
1940 	struct pci_attach_args *pa = &((struct ixgb_osdep *)hw->back)->ixgb_pa;
1941 	pci_chipset_tag_t pc = pa->pa_pc;
1942 	/* Should we do read/mask/write...?  16 vs 32 bit!!! */
1943 	pci_conf_write(pc, pa->pa_tag, reg, *value);
1944 }
1945 
1946 /**********************************************************************
1947  *
1948  *  Update the board statistics counters.
1949  *
1950  **********************************************************************/
1951 void
1952 ixgb_update_stats_counters(struct ixgb_softc *sc)
1953 {
1954 	struct ifnet   *ifp;
1955 
1956 	sc->stats.crcerrs += IXGB_READ_REG(&sc->hw, CRCERRS);
1957 	sc->stats.gprcl += IXGB_READ_REG(&sc->hw, GPRCL);
1958 	sc->stats.gprch += IXGB_READ_REG(&sc->hw, GPRCH);
1959 	sc->stats.gorcl += IXGB_READ_REG(&sc->hw, GORCL);
1960 	sc->stats.gorch += IXGB_READ_REG(&sc->hw, GORCH);
1961 	sc->stats.bprcl += IXGB_READ_REG(&sc->hw, BPRCL);
1962 	sc->stats.bprch += IXGB_READ_REG(&sc->hw, BPRCH);
1963 	sc->stats.mprcl += IXGB_READ_REG(&sc->hw, MPRCL);
1964 	sc->stats.mprch += IXGB_READ_REG(&sc->hw, MPRCH);
1965 	sc->stats.roc += IXGB_READ_REG(&sc->hw, ROC);
1966 
1967 	sc->stats.mpc += IXGB_READ_REG(&sc->hw, MPC);
1968 	sc->stats.dc += IXGB_READ_REG(&sc->hw, DC);
1969 	sc->stats.rlec += IXGB_READ_REG(&sc->hw, RLEC);
1970 	sc->stats.xonrxc += IXGB_READ_REG(&sc->hw, XONRXC);
1971 	sc->stats.xontxc += IXGB_READ_REG(&sc->hw, XONTXC);
1972 	sc->stats.xoffrxc += IXGB_READ_REG(&sc->hw, XOFFRXC);
1973 	sc->stats.xofftxc += IXGB_READ_REG(&sc->hw, XOFFTXC);
1974 	sc->stats.gptcl += IXGB_READ_REG(&sc->hw, GPTCL);
1975 	sc->stats.gptch += IXGB_READ_REG(&sc->hw, GPTCH);
1976 	sc->stats.gotcl += IXGB_READ_REG(&sc->hw, GOTCL);
1977 	sc->stats.gotch += IXGB_READ_REG(&sc->hw, GOTCH);
1978 	sc->stats.ruc += IXGB_READ_REG(&sc->hw, RUC);
1979 	sc->stats.rfc += IXGB_READ_REG(&sc->hw, RFC);
1980 	sc->stats.rjc += IXGB_READ_REG(&sc->hw, RJC);
1981 	sc->stats.torl += IXGB_READ_REG(&sc->hw, TORL);
1982 	sc->stats.torh += IXGB_READ_REG(&sc->hw, TORH);
1983 	sc->stats.totl += IXGB_READ_REG(&sc->hw, TOTL);
1984 	sc->stats.toth += IXGB_READ_REG(&sc->hw, TOTH);
1985 	sc->stats.tprl += IXGB_READ_REG(&sc->hw, TPRL);
1986 	sc->stats.tprh += IXGB_READ_REG(&sc->hw, TPRH);
1987 	sc->stats.tptl += IXGB_READ_REG(&sc->hw, TPTL);
1988 	sc->stats.tpth += IXGB_READ_REG(&sc->hw, TPTH);
1989 	sc->stats.plt64c += IXGB_READ_REG(&sc->hw, PLT64C);
1990 	sc->stats.mptcl += IXGB_READ_REG(&sc->hw, MPTCL);
1991 	sc->stats.mptch += IXGB_READ_REG(&sc->hw, MPTCH);
1992 	sc->stats.bptcl += IXGB_READ_REG(&sc->hw, BPTCL);
1993 	sc->stats.bptch += IXGB_READ_REG(&sc->hw, BPTCH);
1994 
1995 	sc->stats.uprcl += IXGB_READ_REG(&sc->hw, UPRCL);
1996 	sc->stats.uprch += IXGB_READ_REG(&sc->hw, UPRCH);
1997 	sc->stats.vprcl += IXGB_READ_REG(&sc->hw, VPRCL);
1998 	sc->stats.vprch += IXGB_READ_REG(&sc->hw, VPRCH);
1999 	sc->stats.jprcl += IXGB_READ_REG(&sc->hw, JPRCL);
2000 	sc->stats.jprch += IXGB_READ_REG(&sc->hw, JPRCH);
2001 	sc->stats.rnbc += IXGB_READ_REG(&sc->hw, RNBC);
2002 	sc->stats.icbc += IXGB_READ_REG(&sc->hw, ICBC);
2003 	sc->stats.ecbc += IXGB_READ_REG(&sc->hw, ECBC);
2004 	sc->stats.uptcl += IXGB_READ_REG(&sc->hw, UPTCL);
2005 	sc->stats.uptch += IXGB_READ_REG(&sc->hw, UPTCH);
2006 	sc->stats.vptcl += IXGB_READ_REG(&sc->hw, VPTCL);
2007 	sc->stats.vptch += IXGB_READ_REG(&sc->hw, VPTCH);
2008 	sc->stats.jptcl += IXGB_READ_REG(&sc->hw, JPTCL);
2009 	sc->stats.jptch += IXGB_READ_REG(&sc->hw, JPTCH);
2010 	sc->stats.tsctc += IXGB_READ_REG(&sc->hw, TSCTC);
2011 	sc->stats.tsctfc += IXGB_READ_REG(&sc->hw, TSCTFC);
2012 	sc->stats.ibic += IXGB_READ_REG(&sc->hw, IBIC);
2013 	sc->stats.lfc += IXGB_READ_REG(&sc->hw, LFC);
2014 	sc->stats.pfrc += IXGB_READ_REG(&sc->hw, PFRC);
2015 	sc->stats.pftc += IXGB_READ_REG(&sc->hw, PFTC);
2016 	sc->stats.mcfrc += IXGB_READ_REG(&sc->hw, MCFRC);
2017 
2018 	ifp = &sc->interface_data.ac_if;
2019 
2020 	/* Fill out the OS statistics structure */
2021 	ifp->if_collisions = 0;
2022 
2023 	/* Rx Errors */
2024 	ifp->if_ierrors =
2025 		sc->dropped_pkts +
2026 		sc->stats.crcerrs +
2027 		sc->stats.rnbc +
2028 		sc->stats.mpc +
2029 		sc->stats.rlec;
2030 
2031 	/* Tx Errors */
2032 	ifp->if_oerrors =
2033 		sc->watchdog_events;
2034 }
2035 
2036 #ifdef IXGB_DEBUG
2037 /**********************************************************************
2038  *
2039  *  This routine is called only when ixgb_display_debug_stats is enabled.
2040  *  This routine provides a way to take a look at important statistics
2041  *  maintained by the driver and hardware.
2042  *
2043  **********************************************************************/
2044 void
2045 ixgb_print_hw_stats(struct ixgb_softc *sc)
2046 {
2047 	char            buf_speed[100], buf_type[100];
2048 	ixgb_bus_speed  bus_speed;
2049 	ixgb_bus_type   bus_type;
2050 	const char * const unit = sc->sc_dv.dv_xname;
2051 
2052 	bus_speed = sc->hw.bus.speed;
2053 	bus_type = sc->hw.bus.type;
2054 	snprintf(buf_speed, sizeof(buf_speed),
2055 		bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2056 		bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2057 		bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2058 		bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2059 		"UNKNOWN");
2060 	printf("%s: PCI_Bus_Speed = %s\n", unit,
2061 		buf_speed);
2062 
2063 	snprintf(buf_type, sizeof(buf_type),
2064 		bus_type == ixgb_bus_type_pci ? "PCI" :
2065 		bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2066 		"UNKNOWN");
2067 	printf("%s: PCI_Bus_Type = %s\n", unit,
2068 		buf_type);
2069 
2070 	printf("%s: Tx Descriptors not Avail1 = %ld\n", unit,
2071 		sc->no_tx_desc_avail1);
2072 	printf("%s: Tx Descriptors not Avail2 = %ld\n", unit,
2073 		sc->no_tx_desc_avail2);
2074 	printf("%s: Std Mbuf Failed = %ld\n", unit,
2075 		sc->mbuf_alloc_failed);
2076 	printf("%s: Std Cluster Failed = %ld\n", unit,
2077 		sc->mbuf_cluster_failed);
2078 
2079 	printf("%s: Defer count = %lld\n", unit,
2080 		(long long)sc->stats.dc);
2081 	printf("%s: Missed Packets = %lld\n", unit,
2082 		(long long)sc->stats.mpc);
2083 	printf("%s: Receive No Buffers = %lld\n", unit,
2084 		(long long)sc->stats.rnbc);
2085 	printf("%s: Receive length errors = %lld\n", unit,
2086 		(long long)sc->stats.rlec);
2087 	printf("%s: Crc errors = %lld\n", unit,
2088 		(long long)sc->stats.crcerrs);
2089 	printf("%s: Driver dropped packets = %ld\n", unit,
2090 		sc->dropped_pkts);
2091 
2092 	printf("%s: XON Rcvd = %lld\n", unit,
2093 		(long long)sc->stats.xonrxc);
2094 	printf("%s: XON Xmtd = %lld\n", unit,
2095 		(long long)sc->stats.xontxc);
2096 	printf("%s: XOFF Rcvd = %lld\n", unit,
2097 		(long long)sc->stats.xoffrxc);
2098 	printf("%s: XOFF Xmtd = %lld\n", unit,
2099 		(long long)sc->stats.xofftxc);
2100 
2101 	printf("%s: Good Packets Rcvd = %lld\n", unit,
2102 		(long long)sc->stats.gprcl);
2103 	printf("%s: Good Packets Xmtd = %lld\n", unit,
2104 		(long long)sc->stats.gptcl);
2105 
2106 	printf("%s: Jumbo frames recvd = %lld\n", unit,
2107 		(long long)sc->stats.jprcl);
2108 	printf("%s: Jumbo frames Xmtd = %lld\n", unit,
2109 		(long long)sc->stats.jptcl);
2110 }
2111 #endif
2112