xref: /openbsd-src/sys/dev/pci/if_ixgb.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /**************************************************************************
2 
3 Copyright (c) 2001-2005, Intel Corporation
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15 
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19 
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31 
32 ***************************************************************************/
33 
34 /* $OpenBSD: if_ixgb.c,v 1.52 2008/11/28 02:44:18 brad Exp $ */
35 
36 #include <dev/pci/if_ixgb.h>
37 
38 #ifdef IXGB_DEBUG
39 /*********************************************************************
40  *  Set this to one to display debug statistics
41  *********************************************************************/
42 int             ixgb_display_debug_stats = 0;
43 #endif
44 
45 /*********************************************************************
46  *  Driver version
47  *********************************************************************/
48 
49 #define IXGB_DRIVER_VERSION	"6.1.0"
50 
51 /*********************************************************************
52  *  PCI Device ID Table
53  *********************************************************************/
54 
55 const struct pci_matchid ixgb_devices[] = {
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_SR },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_LR },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_CX4 },
60 };
61 
62 /*********************************************************************
63  *  Function prototypes
64  *********************************************************************/
65 int  ixgb_probe(struct device *, void *, void *);
66 void ixgb_attach(struct device *, struct device *, void *);
67 void ixgb_shutdown(void *);
68 int  ixgb_intr(void *);
69 void ixgb_power(int, void *);
70 void ixgb_start(struct ifnet *);
71 int  ixgb_ioctl(struct ifnet *, u_long, caddr_t);
72 void ixgb_watchdog(struct ifnet *);
73 void ixgb_init(void *);
74 void ixgb_stop(void *);
75 void ixgb_media_status(struct ifnet *, struct ifmediareq *);
76 int  ixgb_media_change(struct ifnet *);
77 void ixgb_identify_hardware(struct ixgb_softc *);
78 int  ixgb_allocate_pci_resources(struct ixgb_softc *);
79 void ixgb_free_pci_resources(struct ixgb_softc *);
80 void ixgb_local_timer(void *);
81 int  ixgb_hardware_init(struct ixgb_softc *);
82 void ixgb_setup_interface(struct ixgb_softc *);
83 int  ixgb_setup_transmit_structures(struct ixgb_softc *);
84 void ixgb_initialize_transmit_unit(struct ixgb_softc *);
85 int  ixgb_setup_receive_structures(struct ixgb_softc *);
86 void ixgb_initialize_receive_unit(struct ixgb_softc *);
87 void ixgb_enable_intr(struct ixgb_softc *);
88 void ixgb_disable_intr(struct ixgb_softc *);
89 void ixgb_free_transmit_structures(struct ixgb_softc *);
90 void ixgb_free_receive_structures(struct ixgb_softc *);
91 void ixgb_update_stats_counters(struct ixgb_softc *);
92 void ixgb_txeof(struct ixgb_softc *);
93 int  ixgb_allocate_receive_structures(struct ixgb_softc *);
94 int  ixgb_allocate_transmit_structures(struct ixgb_softc *);
95 void ixgb_rxeof(struct ixgb_softc *, int);
96 void
97 ixgb_receive_checksum(struct ixgb_softc *,
98 		      struct ixgb_rx_desc * rx_desc,
99 		      struct mbuf *);
100 void
101 ixgb_transmit_checksum_setup(struct ixgb_softc *,
102 			     struct mbuf *,
103 			     u_int8_t *);
104 void ixgb_set_promisc(struct ixgb_softc *);
105 void ixgb_set_multi(struct ixgb_softc *);
106 #ifdef IXGB_DEBUG
107 void ixgb_print_hw_stats(struct ixgb_softc *);
108 #endif
109 void ixgb_update_link_status(struct ixgb_softc *);
110 int
111 ixgb_get_buf(struct ixgb_softc *, int i,
112 	     struct mbuf *);
113 void ixgb_enable_hw_vlans(struct ixgb_softc *);
114 int  ixgb_encap(struct ixgb_softc *, struct mbuf *);
115 int
116 ixgb_dma_malloc(struct ixgb_softc *, bus_size_t,
117 		struct ixgb_dma_alloc *, int);
118 void ixgb_dma_free(struct ixgb_softc *, struct ixgb_dma_alloc *);
119 
120 /*********************************************************************
121  *  OpenBSD Device Interface Entry Points
122  *********************************************************************/
123 
124 struct cfattach ixgb_ca = {
125 	sizeof(struct ixgb_softc), ixgb_probe, ixgb_attach
126 };
127 
128 struct cfdriver ixgb_cd = {
129 	0, "ixgb", DV_IFNET
130 };
131 
132 /* some defines for controlling descriptor fetches in h/w */
133 #define RXDCTL_PTHRESH_DEFAULT 0	/* chip considers prefech below this */
134 #define RXDCTL_HTHRESH_DEFAULT 0	/* chip will only prefetch if tail is
135 					 * pushed this many descriptors from
136 					 * head */
137 #define RXDCTL_WTHRESH_DEFAULT 0	/* chip writes back at this many or RXT0 */
138 
139 
140 /*********************************************************************
141  *  Device identification routine
142  *
143  *  ixgb_probe determines if the driver should be loaded on
144  *  adapter based on PCI vendor/device id of the adapter.
145  *
146  *  return 0 on no match, positive on match
147  *********************************************************************/
148 
149 int
150 ixgb_probe(struct device *parent, void *match, void *aux)
151 {
152 	INIT_DEBUGOUT("ixgb_probe: begin");
153 
154 	return (pci_matchbyid((struct pci_attach_args *)aux, ixgb_devices,
155 	    sizeof(ixgb_devices)/sizeof(ixgb_devices[0])));
156 }
157 
158 /*********************************************************************
159  *  Device initialization routine
160  *
161  *  The attach entry point is called when the driver is being loaded.
162  *  This routine identifies the type of hardware, allocates all resources
163  *  and initializes the hardware.
164  *
165  *********************************************************************/
166 
167 void
168 ixgb_attach(struct device *parent, struct device *self, void *aux)
169 {
170 	struct pci_attach_args *pa = aux;
171 	struct ixgb_softc *sc;
172 	int             tsize, rsize;
173 
174 	INIT_DEBUGOUT("ixgb_attach: begin");
175 
176 	sc = (struct ixgb_softc *)self;
177 	sc->osdep.ixgb_pa = *pa;
178 
179 	timeout_set(&sc->timer_handle, ixgb_local_timer, sc);
180 
181 	/* Determine hardware revision */
182 	ixgb_identify_hardware(sc);
183 
184 	/* Parameters (to be read from user) */
185 	sc->num_tx_desc = IXGB_MAX_TXD;
186 	sc->num_rx_desc = IXGB_MAX_RXD;
187 	sc->tx_int_delay = TIDV;
188 	sc->rx_int_delay = RDTR;
189 	sc->rx_buffer_len = IXGB_RXBUFFER_2048;
190 
191 	/*
192 	 * These parameters control the automatic generation(Tx) and
193 	 * response(Rx) to Ethernet PAUSE frames.
194 	 */
195 	sc->hw.fc.high_water = FCRTH;
196 	sc->hw.fc.low_water = FCRTL;
197 	sc->hw.fc.pause_time = FCPAUSE;
198 	sc->hw.fc.send_xon = TRUE;
199 	sc->hw.fc.type = FLOW_CONTROL;
200 
201 	/* Set the max frame size assuming standard ethernet sized frames */
202 	sc->hw.max_frame_size = IXGB_MAX_JUMBO_FRAME_SIZE;
203 
204 	if (ixgb_allocate_pci_resources(sc)) {
205 		printf("%s: Allocation of PCI resources failed\n",
206 		       sc->sc_dv.dv_xname);
207 		goto err_pci;
208 	}
209 
210 	tsize = IXGB_ROUNDUP(sc->num_tx_desc * sizeof(struct ixgb_tx_desc),
211 	    IXGB_MAX_TXD * sizeof(struct ixgb_tx_desc));
212 	tsize = IXGB_ROUNDUP(tsize, PAGE_SIZE);
213 
214 	/* Allocate Transmit Descriptor ring */
215 	if (ixgb_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
216 		printf("%s: Unable to allocate TxDescriptor memory\n",
217 		       sc->sc_dv.dv_xname);
218 		goto err_tx_desc;
219 	}
220 	sc->tx_desc_base = (struct ixgb_tx_desc *) sc->txdma.dma_vaddr;
221 
222 	rsize = IXGB_ROUNDUP(sc->num_rx_desc * sizeof(struct ixgb_rx_desc),
223 	    IXGB_MAX_RXD * sizeof(struct ixgb_rx_desc));
224 	rsize = IXGB_ROUNDUP(rsize, PAGE_SIZE);
225 
226 	/* Allocate Receive Descriptor ring */
227 	if (ixgb_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
228 		printf("%s: Unable to allocate rx_desc memory\n",
229 		       sc->sc_dv.dv_xname);
230 		goto err_rx_desc;
231 	}
232 	sc->rx_desc_base = (struct ixgb_rx_desc *) sc->rxdma.dma_vaddr;
233 
234 	/* Initialize the hardware */
235 	if (ixgb_hardware_init(sc)) {
236 		printf("%s: Unable to initialize the hardware\n",
237 		       sc->sc_dv.dv_xname);
238 		goto err_hw_init;
239 	}
240 
241 	/* Setup OS specific network interface */
242 	ixgb_setup_interface(sc);
243 
244 	/* Initialize statistics */
245 	ixgb_clear_hw_cntrs(&sc->hw);
246 	ixgb_update_stats_counters(sc);
247 	ixgb_update_link_status(sc);
248 
249 	printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
250 
251 	INIT_DEBUGOUT("ixgb_attach: end");
252 	sc->sc_powerhook = powerhook_establish(ixgb_power, sc);
253 	sc->sc_shutdownhook = shutdownhook_establish(ixgb_shutdown, sc);
254 	return;
255 
256 err_hw_init:
257 	ixgb_dma_free(sc, &sc->rxdma);
258 err_rx_desc:
259 	ixgb_dma_free(sc, &sc->txdma);
260 err_tx_desc:
261 err_pci:
262 	ixgb_free_pci_resources(sc);
263 }
264 
265 void
266 ixgb_power(int why, void *arg)
267 {
268 	struct ixgb_softc *sc = (struct ixgb_softc *)arg;
269 	struct ifnet *ifp;
270 
271 	if (why == PWR_RESUME) {
272 		ifp = &sc->interface_data.ac_if;
273 		if (ifp->if_flags & IFF_UP)
274 			ixgb_init(sc);
275 	}
276 }
277 
278 /*********************************************************************
279  *
280  *  Shutdown entry point
281  *
282  **********************************************************************/
283 
284 void
285 ixgb_shutdown(void *arg)
286 {
287 	struct ixgb_softc *sc = arg;
288 
289 	ixgb_stop(sc);
290 }
291 
292 /*********************************************************************
293  *  Transmit entry point
294  *
295  *  ixgb_start is called by the stack to initiate a transmit.
296  *  The driver will remain in this routine as long as there are
297  *  packets to transmit and transmit resources are available.
298  *  In case resources are not available stack is notified and
299  *  the packet is requeued.
300  **********************************************************************/
301 
302 void
303 ixgb_start(struct ifnet *ifp)
304 {
305 	struct mbuf    *m_head;
306 	struct ixgb_softc *sc = ifp->if_softc;
307 	int		post = 0;
308 
309 	if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
310 		return;
311 
312 	if (!sc->link_active)
313 		return;
314 
315 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
316 	    sc->txdma.dma_map->dm_mapsize,
317 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
318 
319 	for (;;) {
320 		IFQ_POLL(&ifp->if_snd, m_head);
321 		if (m_head == NULL)
322 			break;
323 
324 		if (ixgb_encap(sc, m_head)) {
325 			ifp->if_flags |= IFF_OACTIVE;
326 			break;
327 		}
328 
329 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
330 
331 #if NBPFILTER > 0
332 		/* Send a copy of the frame to the BPF listener */
333 		if (ifp->if_bpf)
334 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
335 #endif
336 
337 		/* Set timeout in case hardware has problems transmitting */
338 		ifp->if_timer = IXGB_TX_TIMEOUT;
339 
340 		post = 1;
341 	}
342 
343 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
344 	    sc->txdma.dma_map->dm_mapsize,
345 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
346 	/*
347 	 * Advance the Transmit Descriptor Tail (Tdt),
348 	 * this tells the E1000 that this frame
349 	 * is available to transmit.
350 	 */
351 	if (post)
352 		IXGB_WRITE_REG(&sc->hw, TDT, sc->next_avail_tx_desc);
353 }
354 
355 /*********************************************************************
356  *  Ioctl entry point
357  *
358  *  ixgb_ioctl is called when the user wants to configure the
359  *  interface.
360  *
361  *  return 0 on success, positive on failure
362  **********************************************************************/
363 
364 int
365 ixgb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
366 {
367 	struct ixgb_softc *sc = ifp->if_softc;
368 	struct ifaddr	*ifa = (struct ifaddr *) data;
369 	struct ifreq	*ifr = (struct ifreq *) data;
370 	int		s, error = 0;
371 
372 	s = splnet();
373 
374 	switch (command) {
375 	case SIOCSIFADDR:
376 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
377 			       "Addr)");
378 		ifp->if_flags |= IFF_UP;
379 		if (!(ifp->if_flags & IFF_RUNNING))
380 			ixgb_init(sc);
381 #ifdef INET
382 		if (ifa->ifa_addr->sa_family == AF_INET)
383 			arp_ifinit(&sc->interface_data, ifa);
384 #endif /* INET */
385 		break;
386 
387 	case SIOCSIFFLAGS:
388 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
389 		if (ifp->if_flags & IFF_UP) {
390 			/*
391 			 * If only the PROMISC or ALLMULTI flag changes, then
392 			 * don't do a full re-init of the chip, just update
393 			 * the Rx filter.
394 			 */
395 			if ((ifp->if_flags & IFF_RUNNING) &&
396 			    ((ifp->if_flags ^ sc->if_flags) &
397 			     (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
398 				ixgb_set_promisc(sc);
399 			} else {
400 				if (!(ifp->if_flags & IFF_RUNNING))
401 					ixgb_init(sc);
402 			}
403 		} else {
404 			if (ifp->if_flags & IFF_RUNNING)
405 				ixgb_stop(sc);
406 		}
407 		sc->if_flags = ifp->if_flags;
408 		break;
409 
410 	case SIOCSIFMEDIA:
411 	case SIOCGIFMEDIA:
412 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
413 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
414 		break;
415 
416 	default:
417 		error = ether_ioctl(ifp, &sc->interface_data, command, data);
418 	}
419 
420 	if (error == ENETRESET) {
421 		if (ifp->if_flags & IFF_RUNNING) {
422 			ixgb_disable_intr(sc);
423 			ixgb_set_multi(sc);
424 			ixgb_enable_intr(sc);
425 		}
426 		error = 0;
427 	}
428 
429 	splx(s);
430 	return (error);
431 }
432 
433 /*********************************************************************
434  *  Watchdog entry point
435  *
436  *  This routine is called whenever hardware quits transmitting.
437  *
438  **********************************************************************/
439 
440 void
441 ixgb_watchdog(struct ifnet * ifp)
442 {
443 	struct ixgb_softc *sc = ifp->if_softc;
444 
445 	/*
446 	 * If we are in this routine because of pause frames, then don't
447 	 * reset the hardware.
448 	 */
449 	if (IXGB_READ_REG(&sc->hw, STATUS) & IXGB_STATUS_TXOFF) {
450 		ifp->if_timer = IXGB_TX_TIMEOUT;
451 		return;
452 	}
453 
454 	printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
455 
456 	ixgb_init(sc);
457 
458 	sc->watchdog_events++;
459 }
460 
461 /*********************************************************************
462  *  Init entry point
463  *
464  *  This routine is used in two ways. It is used by the stack as
465  *  init entry point in network interface structure. It is also used
466  *  by the driver as a hw/sw initialization routine to get to a
467  *  consistent state.
468  *
469  **********************************************************************/
470 
471 void
472 ixgb_init(void *arg)
473 {
474 	struct ixgb_softc *sc = arg;
475 	struct ifnet   *ifp = &sc->interface_data.ac_if;
476 	uint32_t temp_reg;
477 	int s;
478 
479 	INIT_DEBUGOUT("ixgb_init: begin");
480 
481 	s = splnet();
482 
483 	ixgb_stop(sc);
484 
485 	/* Get the latest mac address, User can use a LAA */
486 	bcopy(sc->interface_data.ac_enaddr, sc->hw.curr_mac_addr,
487 	      IXGB_ETH_LENGTH_OF_ADDRESS);
488 
489 	/* Initialize the hardware */
490 	if (ixgb_hardware_init(sc)) {
491 		printf("%s: Unable to initialize the hardware\n",
492 		       sc->sc_dv.dv_xname);
493 		splx(s);
494 		return;
495 	}
496 
497 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
498 		ixgb_enable_hw_vlans(sc);
499 
500 	/* Prepare transmit descriptors and buffers */
501 	if (ixgb_setup_transmit_structures(sc)) {
502 		printf("%s: Could not setup transmit structures\n",
503 		       sc->sc_dv.dv_xname);
504 		ixgb_stop(sc);
505 		splx(s);
506 		return;
507 	}
508 	ixgb_initialize_transmit_unit(sc);
509 
510 	/* Setup Multicast table */
511 	ixgb_set_multi(sc);
512 
513 	/* Prepare receive descriptors and buffers */
514 	if (ixgb_setup_receive_structures(sc)) {
515 		printf("%s: Could not setup receive structures\n",
516 		       sc->sc_dv.dv_xname);
517 		ixgb_stop(sc);
518 		splx(s);
519 		return;
520 	}
521 	ixgb_initialize_receive_unit(sc);
522 
523 	/* Don't lose promiscuous settings */
524 	ixgb_set_promisc(sc);
525 
526 	ifp->if_flags |= IFF_RUNNING;
527 	ifp->if_flags &= ~IFF_OACTIVE;
528 
529 	/* Enable jumbo frames */
530 	IXGB_WRITE_REG(&sc->hw, MFRMS,
531 	    sc->hw.max_frame_size << IXGB_MFRMS_SHIFT);
532 	temp_reg = IXGB_READ_REG(&sc->hw, CTRL0);
533 	temp_reg |= IXGB_CTRL0_JFE;
534 	IXGB_WRITE_REG(&sc->hw, CTRL0, temp_reg);
535 
536 	timeout_add_sec(&sc->timer_handle, 1);
537 	ixgb_clear_hw_cntrs(&sc->hw);
538 	ixgb_enable_intr(sc);
539 
540 	splx(s);
541 }
542 
543 /*********************************************************************
544  *
545  *  Interrupt Service routine
546  *
547  **********************************************************************/
548 
549 int
550 ixgb_intr(void *arg)
551 {
552 	struct ixgb_softc *sc = arg;
553 	struct ifnet	*ifp;
554 	u_int32_t	reg_icr;
555 	boolean_t	rxdmt0 = FALSE;
556 	int claimed = 0;
557 
558 	ifp = &sc->interface_data.ac_if;
559 
560 	for (;;) {
561 		reg_icr = IXGB_READ_REG(&sc->hw, ICR);
562 		if (reg_icr == 0)
563 			break;
564 
565 		claimed = 1;
566 
567 		if (reg_icr & IXGB_INT_RXDMT0)
568 			rxdmt0 = TRUE;
569 
570 		if (ifp->if_flags & IFF_RUNNING) {
571 			ixgb_rxeof(sc, -1);
572 			ixgb_txeof(sc);
573 		}
574 
575 		/* Link status change */
576 		if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) {
577 			timeout_del(&sc->timer_handle);
578 			ixgb_check_for_link(&sc->hw);
579 			ixgb_update_link_status(sc);
580 			timeout_add_sec(&sc->timer_handle, 1);
581 		}
582 
583 		if (rxdmt0 && sc->raidc) {
584 			IXGB_WRITE_REG(&sc->hw, IMC, IXGB_INT_RXDMT0);
585 			IXGB_WRITE_REG(&sc->hw, IMS, IXGB_INT_RXDMT0);
586 		}
587 	}
588 
589 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
590 		ixgb_start(ifp);
591 
592 	return (claimed);
593 }
594 
595 
596 /*********************************************************************
597  *
598  *  Media Ioctl callback
599  *
600  *  This routine is called whenever the user queries the status of
601  *  the interface using ifconfig.
602  *
603  **********************************************************************/
604 void
605 ixgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
606 {
607 	struct ixgb_softc *sc = ifp->if_softc;
608 
609 	INIT_DEBUGOUT("ixgb_media_status: begin");
610 
611 	ixgb_check_for_link(&sc->hw);
612 	ixgb_update_link_status(sc);
613 
614 	ifmr->ifm_status = IFM_AVALID;
615 	ifmr->ifm_active = IFM_ETHER;
616 
617 	if (!sc->hw.link_up) {
618 		ifmr->ifm_active |= IFM_NONE;
619 		return;
620 	}
621 
622 	ifmr->ifm_status |= IFM_ACTIVE;
623 	if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
624 	    (sc->hw.phy_type == ixgb_phy_type_txn17401))
625 		ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
626 	else
627 		ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
628 
629 	return;
630 }
631 
632 /*********************************************************************
633  *
634  *  Media Ioctl callback
635  *
636  *  This routine is called when the user changes speed/duplex using
637  *  media/mediopt option with ifconfig.
638  *
639  **********************************************************************/
640 int
641 ixgb_media_change(struct ifnet * ifp)
642 {
643 	struct ixgb_softc *sc = ifp->if_softc;
644 	struct ifmedia *ifm = &sc->media;
645 
646 	INIT_DEBUGOUT("ixgb_media_change: begin");
647 
648 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
649 		return (EINVAL);
650 
651 	return (0);
652 }
653 
654 /*********************************************************************
655  *
656  *  This routine maps the mbufs to tx descriptors.
657  *
658  *  return 0 on success, positive on failure
659  **********************************************************************/
660 
661 int
662 ixgb_encap(struct ixgb_softc *sc, struct mbuf *m_head)
663 {
664 	u_int8_t        txd_popts;
665 	int             i, j, error = 0;
666 	bus_dmamap_t	map;
667 
668 	struct ixgb_buffer *tx_buffer;
669 	struct ixgb_tx_desc *current_tx_desc = NULL;
670 
671 	/*
672 	 * Force a cleanup if number of TX descriptors available hits the
673 	 * threshold
674 	 */
675 	if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
676 		ixgb_txeof(sc);
677 		/* Now do we at least have a minimal? */
678 		if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) {
679 			sc->no_tx_desc_avail1++;
680 			return (ENOBUFS);
681 		}
682 	}
683 
684 	/*
685 	 * Map the packet for DMA.
686 	 */
687 	tx_buffer = &sc->tx_buffer_area[sc->next_avail_tx_desc];
688 	map = tx_buffer->map;
689 
690 	error = bus_dmamap_load_mbuf(sc->txtag, map,
691 				     m_head, BUS_DMA_NOWAIT);
692 	if (error != 0) {
693 		sc->no_tx_dma_setup++;
694 		return (error);
695 	}
696 	IXGB_KASSERT(map->dm_nsegs != 0, ("ixgb_encap: empty packet"));
697 
698 	if (map->dm_nsegs > sc->num_tx_desc_avail)
699 		goto fail;
700 
701 #ifdef IXGB_CSUM_OFFLOAD
702 	ixgb_transmit_checksum_setup(sc, m_head, &txd_popts);
703 #else
704 	txd_popts = 0;
705 #endif
706 
707 	i = sc->next_avail_tx_desc;
708 	for (j = 0; j < map->dm_nsegs; j++) {
709 		tx_buffer = &sc->tx_buffer_area[i];
710 		current_tx_desc = &sc->tx_desc_base[i];
711 
712 		current_tx_desc->buff_addr = htole64(map->dm_segs[j].ds_addr);
713 		current_tx_desc->cmd_type_len = htole32((sc->txd_cmd | map->dm_segs[j].ds_len));
714 		current_tx_desc->popts = txd_popts;
715 		if (++i == sc->num_tx_desc)
716 			i = 0;
717 
718 		tx_buffer->m_head = NULL;
719 	}
720 
721 	sc->num_tx_desc_avail -= map->dm_nsegs;
722 	sc->next_avail_tx_desc = i;
723 
724 	/* Find out if we are in VLAN mode */
725 	if (m_head->m_flags & M_VLANTAG) {
726 		/* Set the VLAN id */
727 		current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag;
728 
729 		/* Tell hardware to add tag */
730 		current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
731 	}
732 
733 	tx_buffer->m_head = m_head;
734 	bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
735 	    BUS_DMASYNC_PREWRITE);
736 
737 	/*
738 	 * Last Descriptor of Packet needs End Of Packet (EOP)
739 	 */
740 	current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_EOP);
741 
742 	return (0);
743 
744 fail:
745 	sc->no_tx_desc_avail2++;
746 	bus_dmamap_unload(sc->txtag, map);
747 	return (ENOBUFS);
748 }
749 
750 void
751 ixgb_set_promisc(struct ixgb_softc *sc)
752 {
753 
754 	u_int32_t       reg_rctl;
755 	struct ifnet   *ifp = &sc->interface_data.ac_if;
756 
757 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
758 
759 	if (ifp->if_flags & IFF_PROMISC) {
760 		reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
761 	} else if (ifp->if_flags & IFF_ALLMULTI) {
762 		reg_rctl |= IXGB_RCTL_MPE;
763 		reg_rctl &= ~IXGB_RCTL_UPE;
764 	} else {
765 		reg_rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
766 	}
767 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
768 }
769 
770 /*********************************************************************
771  *  Multicast Update
772  *
773  *  This routine is called whenever multicast address list is updated.
774  *
775  **********************************************************************/
776 
777 void
778 ixgb_set_multi(struct ixgb_softc *sc)
779 {
780 	u_int32_t       reg_rctl = 0;
781 	u_int8_t        mta[MAX_NUM_MULTICAST_ADDRESSES * IXGB_ETH_LENGTH_OF_ADDRESS];
782 	int             mcnt = 0;
783 	struct ifnet   *ifp = &sc->interface_data.ac_if;
784 	struct arpcom *ac = &sc->interface_data;
785 	struct ether_multi *enm;
786 	struct ether_multistep step;
787 
788 	IOCTL_DEBUGOUT("ixgb_set_multi: begin");
789 
790 	ETHER_FIRST_MULTI(step, ac, enm);
791 	while (enm != NULL) {
792 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
793 			ifp->if_flags |= IFF_ALLMULTI;
794 			mcnt = MAX_NUM_MULTICAST_ADDRESSES;
795 		}
796 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
797 			break;
798 		bcopy(enm->enm_addrlo, &mta[mcnt*IXGB_ETH_LENGTH_OF_ADDRESS],
799 		      IXGB_ETH_LENGTH_OF_ADDRESS);
800 		mcnt++;
801 		ETHER_NEXT_MULTI(step, enm);
802 	}
803 
804 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
805 		reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
806 		reg_rctl |= IXGB_RCTL_MPE;
807 		IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
808 	} else
809 		ixgb_mc_addr_list_update(&sc->hw, mta, mcnt, 0);
810 }
811 
812 
813 /*********************************************************************
814  *  Timer routine
815  *
816  *  This routine checks for link status and updates statistics.
817  *
818  **********************************************************************/
819 
820 void
821 ixgb_local_timer(void *arg)
822 {
823 	struct ifnet   *ifp;
824 	struct ixgb_softc *sc = arg;
825 	int s;
826 
827 	ifp = &sc->interface_data.ac_if;
828 
829 	s = splnet();
830 
831 	ixgb_check_for_link(&sc->hw);
832 	ixgb_update_link_status(sc);
833 	ixgb_update_stats_counters(sc);
834 #ifdef IXGB_DEBUG
835 	if (ixgb_display_debug_stats && ifp->if_flags & IFF_RUNNING)
836 		ixgb_print_hw_stats(sc);
837 #endif
838 
839 	timeout_add_sec(&sc->timer_handle, 1);
840 
841 	splx(s);
842 }
843 
844 void
845 ixgb_update_link_status(struct ixgb_softc *sc)
846 {
847 	struct ifnet *ifp = &sc->interface_data.ac_if;
848 
849 	if (sc->hw.link_up) {
850 		if (!sc->link_active) {
851 			ifp->if_baudrate = IF_Gbps(10);
852 			sc->link_active = 1;
853 			ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
854 			if_link_state_change(ifp);
855 		}
856 	} else {
857 		if (sc->link_active) {
858 			ifp->if_baudrate = 0;
859 			sc->link_active = 0;
860 			ifp->if_link_state = LINK_STATE_DOWN;
861 			if_link_state_change(ifp);
862 		}
863 	}
864 }
865 
866 /*********************************************************************
867  *
868  *  This routine disables all traffic on the adapter by issuing a
869  *  global reset on the MAC and deallocates TX/RX buffers.
870  *
871  **********************************************************************/
872 
873 void
874 ixgb_stop(void *arg)
875 {
876 	struct ifnet   *ifp;
877 	struct ixgb_softc *sc = arg;
878 	ifp = &sc->interface_data.ac_if;
879 
880 	INIT_DEBUGOUT("ixgb_stop: begin\n");
881 	ixgb_disable_intr(sc);
882 	sc->hw.adapter_stopped = FALSE;
883 	ixgb_adapter_stop(&sc->hw);
884 	timeout_del(&sc->timer_handle);
885 
886 	/* Tell the stack that the interface is no longer active */
887 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
888 
889 	ixgb_free_transmit_structures(sc);
890 	ixgb_free_receive_structures(sc);
891 }
892 
893 
894 /*********************************************************************
895  *
896  *  Determine hardware revision.
897  *
898  **********************************************************************/
899 void
900 ixgb_identify_hardware(struct ixgb_softc *sc)
901 {
902 	u_int32_t	reg;
903 	struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
904 
905 	/* Make sure our PCI config space has the necessary stuff set */
906 	sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
907 					    PCI_COMMAND_STATUS_REG);
908 
909 	/* Save off the information about this board */
910 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
911 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
912 
913 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
914 	sc->hw.revision_id = PCI_REVISION(reg);
915 
916 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
917 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
918 	sc->hw.subsystem_id = PCI_PRODUCT(reg);
919 
920 	/* Set MacType, etc. based on this PCI info */
921 	switch (sc->hw.device_id) {
922 	case IXGB_DEVICE_ID_82597EX:
923 	case IXGB_DEVICE_ID_82597EX_SR:
924 	case IXGB_DEVICE_ID_82597EX_LR:
925 	case IXGB_DEVICE_ID_82597EX_CX4:
926 		sc->hw.mac_type = ixgb_82597;
927 		break;
928 	default:
929 		INIT_DEBUGOUT1("Unknown device if 0x%x", sc->hw.device_id);
930 		printf("%s: unsupported device id 0x%x\n",
931 		    sc->sc_dv.dv_xname, sc->hw.device_id);
932 	}
933 }
934 
935 int
936 ixgb_allocate_pci_resources(struct ixgb_softc *sc)
937 
938 {
939 	int val;
940 	pci_intr_handle_t	ih;
941 	const char		*intrstr = NULL;
942 	struct pci_attach_args *pa =  &sc->osdep.ixgb_pa;
943 	pci_chipset_tag_t	pc = pa->pa_pc;
944 
945 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, IXGB_MMBA);
946 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
947 		printf(": mmba is not mem space\n");
948 		return (ENXIO);
949 	}
950 	if (pci_mapreg_map(pa, IXGB_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
951 	    &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
952 	    &sc->osdep.ixgb_membase, &sc->osdep.ixgb_memsize, 0)) {
953 		printf(": cannot find mem space\n");
954 		return (ENXIO);
955 	}
956 
957 	if (pci_intr_map(pa, &ih)) {
958 		printf(": couldn't map interrupt\n");
959 		return (ENXIO);
960 	}
961 
962 	sc->hw.back = &sc->osdep;
963 
964 	intrstr = pci_intr_string(pc, ih);
965 	sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, ixgb_intr, sc,
966 					    sc->sc_dv.dv_xname);
967 	if (sc->sc_intrhand == NULL) {
968 		printf(": couldn't establish interrupt");
969 		if (intrstr != NULL)
970 			printf(" at %s", intrstr);
971 		printf("\n");
972 		return (ENXIO);
973 	}
974 	printf(": %s", intrstr);
975 
976 	return (0);
977 }
978 
979 void
980 ixgb_free_pci_resources(struct ixgb_softc *sc)
981 {
982 	struct pci_attach_args *pa = &sc->osdep.ixgb_pa;
983 	pci_chipset_tag_t	pc = pa->pa_pc;
984 
985 	if (sc->sc_intrhand)
986 		pci_intr_disestablish(pc, sc->sc_intrhand);
987 	sc->sc_intrhand = 0;
988 
989 	if (sc->osdep.ixgb_membase)
990 		bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
991 				sc->osdep.ixgb_memsize);
992 	sc->osdep.ixgb_membase = 0;
993 }
994 
995 /*********************************************************************
996  *
997  *  Initialize the hardware to a configuration as specified by the
998  *  adapter structure. The controller is reset, the EEPROM is
999  *  verified, the MAC address is set, then the shared initialization
1000  *  routines are called.
1001  *
1002  **********************************************************************/
1003 int
1004 ixgb_hardware_init(struct ixgb_softc *sc)
1005 {
1006 	/* Issue a global reset */
1007 	sc->hw.adapter_stopped = FALSE;
1008 	ixgb_adapter_stop(&sc->hw);
1009 
1010 	/* Make sure we have a good EEPROM before we read from it */
1011 	if (!ixgb_validate_eeprom_checksum(&sc->hw)) {
1012 		printf("%s: The EEPROM Checksum Is Not Valid\n",
1013 		       sc->sc_dv.dv_xname);
1014 		return (EIO);
1015 	}
1016 	if (!ixgb_init_hw(&sc->hw)) {
1017 		printf("%s: Hardware Initialization Failed",
1018 		       sc->sc_dv.dv_xname);
1019 		return (EIO);
1020 	}
1021 	bcopy(sc->hw.curr_mac_addr, sc->interface_data.ac_enaddr,
1022 	      IXGB_ETH_LENGTH_OF_ADDRESS);
1023 
1024 	return (0);
1025 }
1026 
1027 /*********************************************************************
1028  *
1029  *  Setup networking device structure and register an interface.
1030  *
1031  **********************************************************************/
1032 void
1033 ixgb_setup_interface(struct ixgb_softc *sc)
1034 {
1035 	struct ifnet   *ifp;
1036 	INIT_DEBUGOUT("ixgb_setup_interface: begin");
1037 
1038 	ifp = &sc->interface_data.ac_if;
1039 	strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
1040 
1041 	ifp->if_softc = sc;
1042 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1043 	ifp->if_ioctl = ixgb_ioctl;
1044 	ifp->if_start = ixgb_start;
1045 	ifp->if_watchdog = ixgb_watchdog;
1046 	ifp->if_hardmtu =
1047 		IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN - ETHER_CRC_LEN;
1048 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1049 	IFQ_SET_READY(&ifp->if_snd);
1050 
1051 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1052 
1053 #if NVLAN > 0
1054 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1055 #endif
1056 
1057 #ifdef IXGB_CSUM_OFFLOAD
1058 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4;
1059 #endif
1060 
1061 	/*
1062 	 * Specify the media types supported by this adapter and register
1063 	 * callbacks to update media and link information
1064 	 */
1065 	ifmedia_init(&sc->media, IFM_IMASK, ixgb_media_change,
1066 		     ixgb_media_status);
1067 	if ((sc->hw.phy_type == ixgb_phy_type_g6104) ||
1068 	    (sc->hw.phy_type == ixgb_phy_type_txn17401)) {
1069 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR |
1070 		    IFM_FDX, 0, NULL);
1071 	} else {
1072 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR |
1073 		    IFM_FDX, 0, NULL);
1074 	}
1075 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1076 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1077 
1078 	if_attach(ifp);
1079 	ether_ifattach(ifp);
1080 }
1081 
1082 /********************************************************************
1083  * Manage DMA'able memory.
1084  *******************************************************************/
1085 int
1086 ixgb_dma_malloc(struct ixgb_softc *sc, bus_size_t size,
1087 		struct ixgb_dma_alloc * dma, int mapflags)
1088 {
1089 	int r;
1090 
1091 	dma->dma_tag = sc->osdep.ixgb_pa.pa_dmat;
1092 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1093 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1094 	if (r != 0) {
1095 		printf("%s: ixgb_dma_malloc: bus_dmamap_create failed; "
1096 			"error %u\n", sc->sc_dv.dv_xname, r);
1097 		goto fail_0;
1098 	}
1099 
1100 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1101 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1102 	if (r != 0) {
1103 		printf("%s: ixgb_dma_malloc: bus_dmammem_alloc failed; "
1104 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
1105 			(unsigned long)size, r);
1106 		goto fail_1;
1107 	}
1108 
1109 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1110 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1111 	if (r != 0) {
1112 		printf("%s: ixgb_dma_malloc: bus_dmammem_map failed; "
1113 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
1114 			(unsigned long)size, r);
1115 		goto fail_2;
1116 	}
1117 
1118 	r = bus_dmamap_load(sc->osdep.ixgb_pa.pa_dmat, dma->dma_map,
1119 			    dma->dma_vaddr, size, NULL,
1120 			    mapflags | BUS_DMA_NOWAIT);
1121 	if (r != 0) {
1122 		printf("%s: ixgb_dma_malloc: bus_dmamap_load failed; "
1123 			"error %u\n", sc->sc_dv.dv_xname, r);
1124 		goto fail_3;
1125 	}
1126 
1127 	dma->dma_size = size;
1128 	return (0);
1129 
1130 fail_3:
1131 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1132 fail_2:
1133 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1134 fail_1:
1135 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1136 fail_0:
1137 	dma->dma_map = NULL;
1138 	dma->dma_tag = NULL;
1139 
1140 	return (r);
1141 }
1142 
1143 void
1144 ixgb_dma_free(struct ixgb_softc *sc, struct ixgb_dma_alloc *dma)
1145 {
1146 	if (dma->dma_tag == NULL)
1147 		return;
1148 
1149 	if (dma->dma_map != NULL) {
1150 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1151 		    dma->dma_map->dm_mapsize,
1152 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1153 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1154 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1155 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1156 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1157 	}
1158 }
1159 
1160 /*********************************************************************
1161  *
1162  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1163  *  the information needed to transmit a packet on the wire.
1164  *
1165  **********************************************************************/
1166 int
1167 ixgb_allocate_transmit_structures(struct ixgb_softc *sc)
1168 {
1169 	if (!(sc->tx_buffer_area = malloc(sizeof(struct ixgb_buffer) *
1170 	    sc->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1171 		printf("%s: Unable to allocate tx_buffer memory\n",
1172 		       sc->sc_dv.dv_xname);
1173 		return (ENOMEM);
1174 	}
1175 
1176 	return (0);
1177 }
1178 
1179 /*********************************************************************
1180  *
1181  *  Allocate and initialize transmit structures.
1182  *
1183  **********************************************************************/
1184 int
1185 ixgb_setup_transmit_structures(struct ixgb_softc *sc)
1186 {
1187 	struct	ixgb_buffer *tx_buffer;
1188 	int error, i;
1189 
1190 	if ((error = ixgb_allocate_transmit_structures(sc)) != 0)
1191 		goto fail;
1192 
1193 	bzero((void *)sc->tx_desc_base,
1194 	      (sizeof(struct ixgb_tx_desc)) * sc->num_tx_desc);
1195 
1196 	sc->txtag = sc->osdep.ixgb_pa.pa_dmat;
1197 
1198 	tx_buffer = sc->tx_buffer_area;
1199 	for (i = 0; i < sc->num_tx_desc; i++) {
1200 		error = bus_dmamap_create(sc->txtag, IXGB_MAX_JUMBO_FRAME_SIZE,
1201 			    IXGB_MAX_SCATTER, IXGB_MAX_JUMBO_FRAME_SIZE, 0,
1202 			    BUS_DMA_NOWAIT, &tx_buffer->map);
1203 		if (error != 0) {
1204 			printf("%s: Unable to create TX DMA map\n",
1205 			    sc->sc_dv.dv_xname);
1206 			goto fail;
1207 		}
1208 		tx_buffer++;
1209 	}
1210 
1211 	sc->next_avail_tx_desc = 0;
1212 	sc->oldest_used_tx_desc = 0;
1213 
1214 	/* Set number of descriptors available */
1215 	sc->num_tx_desc_avail = sc->num_tx_desc;
1216 
1217 	/* Set checksum context */
1218 	sc->active_checksum_context = OFFLOAD_NONE;
1219 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1220 	   sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1221 
1222 	return (0);
1223 
1224 fail:
1225 	ixgb_free_transmit_structures(sc);
1226 	return (error);
1227 }
1228 
1229 /*********************************************************************
1230  *
1231  *  Enable transmit unit.
1232  *
1233  **********************************************************************/
1234 void
1235 ixgb_initialize_transmit_unit(struct ixgb_softc *sc)
1236 {
1237 	u_int32_t       reg_tctl;
1238 	u_int64_t       bus_addr;
1239 
1240 	/* Setup the Base and Length of the Tx Descriptor Ring */
1241 	bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
1242 	IXGB_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
1243 	IXGB_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
1244 	IXGB_WRITE_REG(&sc->hw, TDLEN,
1245 		       sc->num_tx_desc *
1246 		       sizeof(struct ixgb_tx_desc));
1247 
1248 	/* Setup the HW Tx Head and Tail descriptor pointers */
1249 	IXGB_WRITE_REG(&sc->hw, TDH, 0);
1250 	IXGB_WRITE_REG(&sc->hw, TDT, 0);
1251 
1252 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
1253 		     IXGB_READ_REG(&sc->hw, TDBAL),
1254 		     IXGB_READ_REG(&sc->hw, TDLEN));
1255 
1256 	IXGB_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
1257 
1258 	/* Program the Transmit Control Register */
1259 	reg_tctl = IXGB_READ_REG(&sc->hw, TCTL);
1260 	reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
1261 	IXGB_WRITE_REG(&sc->hw, TCTL, reg_tctl);
1262 
1263 	/* Setup Transmit Descriptor Settings for this adapter */
1264 	sc->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS;
1265 
1266 	if (sc->tx_int_delay > 0)
1267 		sc->txd_cmd |= IXGB_TX_DESC_CMD_IDE;
1268 }
1269 
1270 /*********************************************************************
1271  *
1272  *  Free all transmit related data structures.
1273  *
1274  **********************************************************************/
1275 void
1276 ixgb_free_transmit_structures(struct ixgb_softc *sc)
1277 {
1278 	struct ixgb_buffer *tx_buffer;
1279 	int             i;
1280 
1281 	INIT_DEBUGOUT("free_transmit_structures: begin");
1282 
1283 	if (sc->tx_buffer_area != NULL) {
1284 		tx_buffer = sc->tx_buffer_area;
1285 		for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
1286 			if (tx_buffer->map != NULL &&
1287 			    tx_buffer->map->dm_nsegs > 0) {
1288 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
1289 				    0, tx_buffer->map->dm_mapsize,
1290 				    BUS_DMASYNC_POSTWRITE);
1291 				bus_dmamap_unload(sc->txtag,
1292 				    tx_buffer->map);
1293 			}
1294 
1295 			if (tx_buffer->m_head != NULL) {
1296 				m_freem(tx_buffer->m_head);
1297 				tx_buffer->m_head = NULL;
1298 			}
1299 			if (tx_buffer->map != NULL) {
1300 				bus_dmamap_destroy(sc->txtag,
1301 				    tx_buffer->map);
1302 				tx_buffer->map = NULL;
1303 			}
1304 		}
1305 	}
1306 	if (sc->tx_buffer_area != NULL) {
1307 		free(sc->tx_buffer_area, M_DEVBUF);
1308 		sc->tx_buffer_area = NULL;
1309 	}
1310 	if (sc->txtag != NULL) {
1311 		sc->txtag = NULL;
1312 	}
1313 }
1314 
1315 /*********************************************************************
1316  *
1317  *  The offload context needs to be set when we transfer the first
1318  *  packet of a particular protocol (TCP/UDP). We change the
1319  *  context only if the protocol type changes.
1320  *
1321  **********************************************************************/
1322 void
1323 ixgb_transmit_checksum_setup(struct ixgb_softc *sc,
1324 			     struct mbuf *mp,
1325 			     u_int8_t *txd_popts)
1326 {
1327 	struct ixgb_context_desc *TXD;
1328 	struct ixgb_buffer *tx_buffer;
1329 	int             curr_txd;
1330 
1331 	if (mp->m_pkthdr.csum_flags) {
1332 
1333 		if (mp->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) {
1334 			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1335 			if (sc->active_checksum_context == OFFLOAD_TCP_IP)
1336 				return;
1337 			else
1338 				sc->active_checksum_context = OFFLOAD_TCP_IP;
1339 
1340 		} else if (mp->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) {
1341 			*txd_popts = IXGB_TX_DESC_POPTS_TXSM;
1342 			if (sc->active_checksum_context == OFFLOAD_UDP_IP)
1343 				return;
1344 			else
1345 				sc->active_checksum_context = OFFLOAD_UDP_IP;
1346 		} else {
1347 			*txd_popts = 0;
1348 			return;
1349 		}
1350 	} else {
1351 		*txd_popts = 0;
1352 		return;
1353 	}
1354 
1355 	/*
1356 	 * If we reach this point, the checksum offload context needs to be
1357 	 * reset.
1358 	 */
1359 	curr_txd = sc->next_avail_tx_desc;
1360 	tx_buffer = &sc->tx_buffer_area[curr_txd];
1361 	TXD = (struct ixgb_context_desc *) & sc->tx_desc_base[curr_txd];
1362 
1363 	TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip);
1364 	TXD->tucse = 0;
1365 
1366 	TXD->mss = 0;
1367 
1368 	if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
1369 		TXD->tucso =
1370 			ENET_HEADER_SIZE + sizeof(struct ip) +
1371 			offsetof(struct tcphdr, th_sum);
1372 	} else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
1373 		TXD->tucso =
1374 			ENET_HEADER_SIZE + sizeof(struct ip) +
1375 			offsetof(struct udphdr, uh_sum);
1376 	}
1377 	TXD->cmd_type_len = htole32(IXGB_CONTEXT_DESC_CMD_TCP |
1378 	    IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE);
1379 
1380 	tx_buffer->m_head = NULL;
1381 
1382 	if (++curr_txd == sc->num_tx_desc)
1383 		curr_txd = 0;
1384 
1385 	sc->num_tx_desc_avail--;
1386 	sc->next_avail_tx_desc = curr_txd;
1387 }
1388 
1389 /**********************************************************************
1390  *
1391  *  Examine each tx_buffer in the used queue. If the hardware is done
1392  *  processing the packet then free associated resources. The
1393  *  tx_buffer is put back on the free queue.
1394  *
1395  **********************************************************************/
1396 void
1397 ixgb_txeof(struct ixgb_softc *sc)
1398 {
1399 	int             i, num_avail;
1400 	struct ixgb_buffer *tx_buffer;
1401 	struct ixgb_tx_desc *tx_desc;
1402 	struct ifnet	*ifp = &sc->interface_data.ac_if;
1403 
1404 	if (sc->num_tx_desc_avail == sc->num_tx_desc)
1405 		return;
1406 
1407 	num_avail = sc->num_tx_desc_avail;
1408 	i = sc->oldest_used_tx_desc;
1409 
1410 	tx_buffer = &sc->tx_buffer_area[i];
1411 	tx_desc = &sc->tx_desc_base[i];
1412 
1413 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1414 	    sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1415 	while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) {
1416 
1417 		tx_desc->status = 0;
1418 		num_avail++;
1419 
1420 		if (tx_buffer->m_head != NULL) {
1421 			ifp->if_opackets++;
1422 
1423 			if (tx_buffer->map->dm_nsegs > 0) {
1424 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
1425 				    0, tx_buffer->map->dm_mapsize,
1426 				    BUS_DMASYNC_POSTWRITE);
1427 				bus_dmamap_unload(sc->txtag, tx_buffer->map);
1428 			}
1429 
1430 			m_freem(tx_buffer->m_head);
1431 			tx_buffer->m_head = NULL;
1432 		}
1433 		if (++i == sc->num_tx_desc)
1434 			i = 0;
1435 
1436 		tx_buffer = &sc->tx_buffer_area[i];
1437 		tx_desc = &sc->tx_desc_base[i];
1438 	}
1439 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1440 	    sc->txdma.dma_map->dm_mapsize,
1441 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1442 
1443 	sc->oldest_used_tx_desc = i;
1444 
1445 	/*
1446 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
1447 	 * it is OK to send packets. If there are no pending descriptors,
1448 	 * clear the timeout. Otherwise, if some descriptors have been freed,
1449 	 * restart the timeout.
1450 	 */
1451 	if (num_avail > IXGB_TX_CLEANUP_THRESHOLD)
1452 		ifp->if_flags &= ~IFF_OACTIVE;
1453 
1454 	/* All clean, turn off the timer */
1455 	if (num_avail == sc->num_tx_desc)
1456 		ifp->if_timer = 0;
1457 	/* Some cleaned, reset the timer */
1458 	else if (num_avail != sc->num_tx_desc_avail)
1459 		ifp->if_timer = IXGB_TX_TIMEOUT;
1460 
1461 	sc->num_tx_desc_avail = num_avail;
1462 }
1463 
1464 
1465 /*********************************************************************
1466  *
1467  *  Get a buffer from system mbuf buffer pool.
1468  *
1469  **********************************************************************/
1470 int
1471 ixgb_get_buf(struct ixgb_softc *sc, int i,
1472 	     struct mbuf *nmp)
1473 {
1474 	struct mbuf *mp = nmp;
1475 	struct ixgb_buffer *rx_buffer;
1476 	int             error;
1477 
1478 	if (mp == NULL) {
1479 		MGETHDR(mp, M_DONTWAIT, MT_DATA);
1480 		if (mp == NULL) {
1481 			sc->mbuf_alloc_failed++;
1482 			return (ENOBUFS);
1483 		}
1484 		MCLGET(mp, M_DONTWAIT);
1485 		if ((mp->m_flags & M_EXT) == 0) {
1486 			m_freem(mp);
1487 			sc->mbuf_cluster_failed++;
1488 			return (ENOBUFS);
1489 		}
1490 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1491 	} else {
1492 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
1493 		mp->m_data = mp->m_ext.ext_buf;
1494 		mp->m_next = NULL;
1495 	}
1496 
1497 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
1498 		m_adj(mp, ETHER_ALIGN);
1499 
1500 	rx_buffer = &sc->rx_buffer_area[i];
1501 
1502 	/*
1503 	 * Using memory from the mbuf cluster pool, invoke the bus_dma
1504 	 * machinery to arrange the memory mapping.
1505 	 */
1506 	error = bus_dmamap_load_mbuf(sc->rxtag, rx_buffer->map,
1507 	    mp, BUS_DMA_NOWAIT);
1508 	if (error) {
1509 		m_freem(mp);
1510 		return (error);
1511 	}
1512 	rx_buffer->m_head = mp;
1513 	sc->rx_desc_base[i].buff_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr);
1514 	bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0,
1515 	    rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD);
1516 
1517 	return (0);
1518 }
1519 
1520 /*********************************************************************
1521  *
1522  *  Allocate memory for rx_buffer structures. Since we use one
1523  *  rx_buffer per received packet, the maximum number of rx_buffer's
1524  *  that we'll need is equal to the number of receive descriptors
1525  *  that we've allocated.
1526  *
1527  **********************************************************************/
1528 int
1529 ixgb_allocate_receive_structures(struct ixgb_softc *sc)
1530 {
1531 	int             i, error;
1532 	struct ixgb_buffer *rx_buffer;
1533 
1534 	if (!(sc->rx_buffer_area = malloc(sizeof(struct ixgb_buffer) *
1535 	    sc->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1536 		printf("%s: Unable to allocate rx_buffer memory\n",
1537 		       sc->sc_dv.dv_xname);
1538 		return (ENOMEM);
1539 	}
1540 
1541 	sc->rxtag = sc->osdep.ixgb_pa.pa_dmat;
1542 
1543 	rx_buffer = sc->rx_buffer_area;
1544 	for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
1545 		error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
1546 					  MCLBYTES, 0, BUS_DMA_NOWAIT,
1547 					  &rx_buffer->map);
1548 		if (error != 0) {
1549 			printf("%s: ixgb_allocate_receive_structures: "
1550 			       "bus_dmamap_create failed; error %u\n",
1551 			       sc->sc_dv.dv_xname, error);
1552 			goto fail;
1553 		}
1554 	}
1555 
1556 	for (i = 0; i < sc->num_rx_desc; i++) {
1557 		error = ixgb_get_buf(sc, i, NULL);
1558 		if (error != 0)
1559 			goto fail;
1560 	}
1561 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1562 	    sc->rxdma.dma_map->dm_mapsize,
1563 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1564 
1565 	return (0);
1566 
1567 fail:
1568 	ixgb_free_receive_structures(sc);
1569 	return (error);
1570 }
1571 
1572 /*********************************************************************
1573  *
1574  *  Allocate and initialize receive structures.
1575  *
1576  **********************************************************************/
1577 int
1578 ixgb_setup_receive_structures(struct ixgb_softc *sc)
1579 {
1580 	bzero((void *)sc->rx_desc_base,
1581 	      (sizeof(struct ixgb_rx_desc)) * sc->num_rx_desc);
1582 
1583 	if (ixgb_allocate_receive_structures(sc))
1584 		return (ENOMEM);
1585 
1586 	/* Setup our descriptor pointers */
1587 	sc->next_rx_desc_to_check = 0;
1588 	sc->next_rx_desc_to_use = 0;
1589 	return (0);
1590 }
1591 
1592 /*********************************************************************
1593  *
1594  *  Enable receive unit.
1595  *
1596  **********************************************************************/
1597 void
1598 ixgb_initialize_receive_unit(struct ixgb_softc *sc)
1599 {
1600 	u_int32_t       reg_rctl;
1601 	u_int32_t       reg_rxcsum;
1602 	u_int32_t       reg_rxdctl;
1603 	u_int64_t       bus_addr;
1604 
1605 	/*
1606 	 * Make sure receives are disabled while setting up the descriptor
1607 	 * ring
1608 	 */
1609 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
1610 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN);
1611 
1612 	/* Set the Receive Delay Timer Register */
1613 	IXGB_WRITE_REG(&sc->hw, RDTR,
1614 		       sc->rx_int_delay);
1615 
1616 	/* Setup the Base and Length of the Rx Descriptor Ring */
1617 	bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
1618 	IXGB_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
1619 	IXGB_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
1620 	IXGB_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
1621 		       sizeof(struct ixgb_rx_desc));
1622 
1623 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
1624 	IXGB_WRITE_REG(&sc->hw, RDH, 0);
1625 
1626 	IXGB_WRITE_REG(&sc->hw, RDT, sc->num_rx_desc - 1);
1627 
1628 	reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
1629 		| RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
1630 		| RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
1631 	IXGB_WRITE_REG(&sc->hw, RXDCTL, reg_rxdctl);
1632 
1633 	sc->raidc = 1;
1634 	if (sc->raidc) {
1635 		uint32_t        raidc;
1636 		uint8_t         poll_threshold;
1637 #define IXGB_RAIDC_POLL_DEFAULT 120
1638 
1639 		poll_threshold = ((sc->num_rx_desc - 1) >> 3);
1640 		poll_threshold >>= 1;
1641 		poll_threshold &= 0x3F;
1642 		raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE |
1643 			(IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) |
1644 			(sc->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) |
1645 			poll_threshold;
1646 		IXGB_WRITE_REG(&sc->hw, RAIDC, raidc);
1647 	}
1648 
1649 	/* Enable Receive Checksum Offload for TCP and UDP ? */
1650 	reg_rxcsum = IXGB_READ_REG(&sc->hw, RXCSUM);
1651 	reg_rxcsum |= IXGB_RXCSUM_TUOFL;
1652 	IXGB_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
1653 
1654 	/* Setup the Receive Control Register */
1655 	reg_rctl = IXGB_READ_REG(&sc->hw, RCTL);
1656 	reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
1657 	reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC |
1658 		IXGB_RCTL_CFF |
1659 		(sc->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
1660 
1661 	switch (sc->rx_buffer_len) {
1662 	default:
1663 	case IXGB_RXBUFFER_2048:
1664 		reg_rctl |= IXGB_RCTL_BSIZE_2048;
1665 		break;
1666 	case IXGB_RXBUFFER_4096:
1667 		reg_rctl |= IXGB_RCTL_BSIZE_4096;
1668 		break;
1669 	case IXGB_RXBUFFER_8192:
1670 		reg_rctl |= IXGB_RCTL_BSIZE_8192;
1671 		break;
1672 	case IXGB_RXBUFFER_16384:
1673 		reg_rctl |= IXGB_RCTL_BSIZE_16384;
1674 		break;
1675 	}
1676 
1677 	reg_rctl |= IXGB_RCTL_RXEN;
1678 
1679 	/* Enable Receives */
1680 	IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1681 }
1682 
1683 /*********************************************************************
1684  *
1685  *  Free receive related data structures.
1686  *
1687  **********************************************************************/
1688 void
1689 ixgb_free_receive_structures(struct ixgb_softc *sc)
1690 {
1691 	struct ixgb_buffer *rx_buffer;
1692 	int             i;
1693 
1694 	INIT_DEBUGOUT("free_receive_structures: begin");
1695 
1696 	if (sc->rx_buffer_area != NULL) {
1697 		rx_buffer = sc->rx_buffer_area;
1698 		for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
1699 			if (rx_buffer->map != NULL &&
1700 			    rx_buffer->map->dm_nsegs > 0) {
1701 				bus_dmamap_sync(sc->rxtag, rx_buffer->map,
1702 				    0, rx_buffer->map->dm_mapsize,
1703 				    BUS_DMASYNC_POSTREAD);
1704 				bus_dmamap_unload(sc->rxtag,
1705 				    rx_buffer->map);
1706 			}
1707 			if (rx_buffer->m_head != NULL) {
1708 				m_freem(rx_buffer->m_head);
1709 				rx_buffer->m_head = NULL;
1710 			}
1711 			if (rx_buffer->map != NULL) {
1712 				bus_dmamap_destroy(sc->rxtag,
1713 				    rx_buffer->map);
1714 				rx_buffer->map = NULL;
1715 			}
1716 		}
1717 	}
1718 	if (sc->rx_buffer_area != NULL) {
1719 		free(sc->rx_buffer_area, M_DEVBUF);
1720 		sc->rx_buffer_area = NULL;
1721 	}
1722 	if (sc->rxtag != NULL)
1723 		sc->rxtag = NULL;
1724 }
1725 
1726 /*********************************************************************
1727  *
1728  *  This routine executes in interrupt context. It replenishes
1729  *  the mbufs in the descriptor and sends data which has been
1730  *  dma'ed into host memory to upper layer.
1731  *
1732  *  We loop at most count times if count is > 0, or until done if
1733  *  count < 0.
1734  *
1735  *********************************************************************/
1736 void
1737 ixgb_rxeof(struct ixgb_softc *sc, int count)
1738 {
1739 	struct ifnet   *ifp;
1740 	struct mbuf    *mp;
1741 	int             eop = 0;
1742 	int             len;
1743 	u_int8_t        accept_frame = 0;
1744 	int             i;
1745 	int             next_to_use = 0;
1746 	int             eop_desc;
1747 
1748 	/* Pointer to the receive descriptor being examined. */
1749 	struct ixgb_rx_desc *current_desc;
1750 
1751 	ifp = &sc->interface_data.ac_if;
1752 	i = sc->next_rx_desc_to_check;
1753 	next_to_use = sc->next_rx_desc_to_use;
1754 	eop_desc = sc->next_rx_desc_to_check;
1755 	current_desc = &sc->rx_desc_base[i];
1756 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1757 	    sc->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1758 
1759 	if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD))
1760 		return;
1761 
1762 	while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) &&
1763 		    (count != 0) &&
1764 		    (ifp->if_flags & IFF_RUNNING)) {
1765 
1766 		mp = sc->rx_buffer_area[i].m_head;
1767 		bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map,
1768 		    0, sc->rx_buffer_area[i].map->dm_mapsize,
1769 		    BUS_DMASYNC_POSTREAD);
1770 		bus_dmamap_unload(sc->rxtag, sc->rx_buffer_area[i].map);
1771 
1772 		accept_frame = 1;
1773 		if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) {
1774 			count--;
1775 			eop = 1;
1776 		} else {
1777 			eop = 0;
1778 		}
1779 		len = current_desc->length;
1780 
1781 		if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
1782 			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
1783 					    IXGB_RX_DESC_ERRORS_RXE))
1784 			accept_frame = 0;
1785 		if (accept_frame) {
1786 
1787 			/* Assign correct length to the current fragment */
1788 			mp->m_len = len;
1789 
1790 			if (sc->fmp == NULL) {
1791 				mp->m_pkthdr.len = len;
1792 				sc->fmp = mp;	/* Store the first mbuf */
1793 				sc->lmp = mp;
1794 			} else {
1795 				/* Chain mbuf's together */
1796 				mp->m_flags &= ~M_PKTHDR;
1797 				sc->lmp->m_next = mp;
1798 				sc->lmp = sc->lmp->m_next;
1799 				sc->fmp->m_pkthdr.len += len;
1800 			}
1801 
1802 			if (eop) {
1803 				eop_desc = i;
1804 				sc->fmp->m_pkthdr.rcvif = ifp;
1805 				ifp->if_ipackets++;
1806 				ixgb_receive_checksum(sc, current_desc, sc->fmp);
1807 
1808 #if NVLAN > 0
1809 				if (current_desc->status & IXGB_RX_DESC_STATUS_VP) {
1810 					sc->fmp->m_pkthdr.ether_vtag =
1811 					    current_desc->special;
1812 					sc->fmp->m_flags |= M_VLANTAG;
1813 				}
1814 #endif
1815 
1816 #if NBPFILTER > 0
1817 				/*
1818 				 * Handle BPF listeners. Let the BPF
1819 				 * user see the packet.
1820 				 */
1821 				if (ifp->if_bpf)
1822 					bpf_mtap_ether(ifp->if_bpf, sc->fmp,
1823 					    BPF_DIRECTION_IN);
1824 #endif
1825 
1826 				ether_input_mbuf(ifp, sc->fmp);
1827 				sc->fmp = NULL;
1828 				sc->lmp = NULL;
1829 			}
1830 			sc->rx_buffer_area[i].m_head = NULL;
1831 		} else {
1832 			sc->dropped_pkts++;
1833 			if (sc->fmp != NULL)
1834 				m_freem(sc->fmp);
1835 			sc->fmp = NULL;
1836 			sc->lmp = NULL;
1837 		}
1838 
1839 		/* Zero out the receive descriptors status  */
1840 		current_desc->status = 0;
1841 		bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
1842 		    sc->rxdma.dma_map->dm_mapsize,
1843 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1844 
1845 		/* Advance our pointers to the next descriptor */
1846 		if (++i == sc->num_rx_desc) {
1847 			i = 0;
1848 			current_desc = sc->rx_desc_base;
1849 		} else
1850 			current_desc++;
1851 	}
1852 	sc->next_rx_desc_to_check = i;
1853 
1854 	if (--i < 0)
1855 		i = (sc->num_rx_desc - 1);
1856 
1857 	/*
1858 	 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes
1859  	 * memory corruption). Avoid using and re-submitting the most recently received RX
1860 	 * descriptor back to hardware.
1861 	 *
1862 	 * if(Last written back descriptor == EOP bit set descriptor)
1863 	 * 	then avoid re-submitting the most recently received RX descriptor
1864 	 *	back to hardware.
1865 	 * if(Last written back descriptor != EOP bit set descriptor)
1866 	 *	then avoid re-submitting the most recently received RX descriptors
1867 	 * 	till last EOP bit set descriptor.
1868 	 */
1869 	if (eop_desc != i) {
1870 		if (++eop_desc == sc->num_rx_desc)
1871 			eop_desc = 0;
1872 		i = eop_desc;
1873 	}
1874 	/* Replenish the descriptors with new mbufs till last EOP bit set descriptor */
1875 	while (next_to_use != i) {
1876 		current_desc = &sc->rx_desc_base[next_to_use];
1877 		if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE |
1878 			    IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P |
1879 					     IXGB_RX_DESC_ERRORS_RXE))) {
1880 			mp = sc->rx_buffer_area[next_to_use].m_head;
1881 			ixgb_get_buf(sc, next_to_use, mp);
1882 		} else {
1883 			if (ixgb_get_buf(sc, next_to_use, NULL) == ENOBUFS)
1884 				break;
1885 		}
1886 		/* Advance our pointers to the next descriptor */
1887 		if (++next_to_use == sc->num_rx_desc)
1888 			next_to_use = 0;
1889 	}
1890 	sc->next_rx_desc_to_use = next_to_use;
1891 	if (--next_to_use < 0)
1892                 next_to_use = (sc->num_rx_desc - 1);
1893         /* Advance the IXGB's Receive Queue #0  "Tail Pointer" */
1894         IXGB_WRITE_REG(&sc->hw, RDT, next_to_use);
1895 }
1896 
1897 /*********************************************************************
1898  *
1899  *  Verify that the hardware indicated that the checksum is valid.
1900  *  Inform the stack about the status of checksum so that stack
1901  *  doesn't spend time verifying the checksum.
1902  *
1903  *********************************************************************/
1904 void
1905 ixgb_receive_checksum(struct ixgb_softc *sc,
1906 		      struct ixgb_rx_desc *rx_desc,
1907 		      struct mbuf *mp)
1908 {
1909 	if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) {
1910 		mp->m_pkthdr.csum_flags = 0;
1911 		return;
1912 	}
1913 
1914 	if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) {
1915 		/* Did it pass? */
1916 		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) {
1917 			/* IP Checksum Good */
1918 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
1919 
1920 		} else {
1921 			mp->m_pkthdr.csum_flags = 0;
1922 		}
1923 	}
1924 	if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) {
1925 		/* Did it pass? */
1926 		if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) {
1927 			mp->m_pkthdr.csum_flags |=
1928 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1929 		}
1930 	}
1931 }
1932 
1933 /*
1934  * This turns on the hardware offload of the VLAN
1935  * tag insertion and strip
1936  */
1937 void
1938 ixgb_enable_hw_vlans(struct ixgb_softc *sc)
1939 {
1940 	uint32_t ctrl;
1941 
1942 	ctrl = IXGB_READ_REG(&sc->hw, CTRL0);
1943 	ctrl |= IXGB_CTRL0_VME;
1944 	IXGB_WRITE_REG(&sc->hw, CTRL0, ctrl);
1945 }
1946 
1947 void
1948 ixgb_enable_intr(struct ixgb_softc *sc)
1949 {
1950 	uint32_t val;
1951 
1952 	val = IXGB_INT_RXT0 | IXGB_INT_TXDW | IXGB_INT_RXDMT0 |
1953 	      IXGB_INT_LSC | IXGB_INT_RXO;
1954 	if (sc->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
1955 		val |= IXGB_INT_GPI0;
1956 	IXGB_WRITE_REG(&sc->hw, IMS, val);
1957 }
1958 
1959 void
1960 ixgb_disable_intr(struct ixgb_softc *sc)
1961 {
1962 	IXGB_WRITE_REG(&sc->hw, IMC, ~0);
1963 }
1964 
1965 void
1966 ixgb_write_pci_cfg(struct ixgb_hw *hw,
1967 		   uint32_t reg,
1968 		   uint16_t *value)
1969 {
1970 	struct pci_attach_args *pa = &((struct ixgb_osdep *)hw->back)->ixgb_pa;
1971 	pci_chipset_tag_t pc = pa->pa_pc;
1972 	/* Should we do read/mask/write...?  16 vs 32 bit!!! */
1973 	pci_conf_write(pc, pa->pa_tag, reg, *value);
1974 }
1975 
1976 /**********************************************************************
1977  *
1978  *  Update the board statistics counters.
1979  *
1980  **********************************************************************/
1981 void
1982 ixgb_update_stats_counters(struct ixgb_softc *sc)
1983 {
1984 	struct ifnet   *ifp;
1985 
1986 	sc->stats.crcerrs += IXGB_READ_REG(&sc->hw, CRCERRS);
1987 	sc->stats.gprcl += IXGB_READ_REG(&sc->hw, GPRCL);
1988 	sc->stats.gprch += IXGB_READ_REG(&sc->hw, GPRCH);
1989 	sc->stats.gorcl += IXGB_READ_REG(&sc->hw, GORCL);
1990 	sc->stats.gorch += IXGB_READ_REG(&sc->hw, GORCH);
1991 	sc->stats.bprcl += IXGB_READ_REG(&sc->hw, BPRCL);
1992 	sc->stats.bprch += IXGB_READ_REG(&sc->hw, BPRCH);
1993 	sc->stats.mprcl += IXGB_READ_REG(&sc->hw, MPRCL);
1994 	sc->stats.mprch += IXGB_READ_REG(&sc->hw, MPRCH);
1995 	sc->stats.roc += IXGB_READ_REG(&sc->hw, ROC);
1996 
1997 	sc->stats.mpc += IXGB_READ_REG(&sc->hw, MPC);
1998 	sc->stats.dc += IXGB_READ_REG(&sc->hw, DC);
1999 	sc->stats.rlec += IXGB_READ_REG(&sc->hw, RLEC);
2000 	sc->stats.xonrxc += IXGB_READ_REG(&sc->hw, XONRXC);
2001 	sc->stats.xontxc += IXGB_READ_REG(&sc->hw, XONTXC);
2002 	sc->stats.xoffrxc += IXGB_READ_REG(&sc->hw, XOFFRXC);
2003 	sc->stats.xofftxc += IXGB_READ_REG(&sc->hw, XOFFTXC);
2004 	sc->stats.gptcl += IXGB_READ_REG(&sc->hw, GPTCL);
2005 	sc->stats.gptch += IXGB_READ_REG(&sc->hw, GPTCH);
2006 	sc->stats.gotcl += IXGB_READ_REG(&sc->hw, GOTCL);
2007 	sc->stats.gotch += IXGB_READ_REG(&sc->hw, GOTCH);
2008 	sc->stats.ruc += IXGB_READ_REG(&sc->hw, RUC);
2009 	sc->stats.rfc += IXGB_READ_REG(&sc->hw, RFC);
2010 	sc->stats.rjc += IXGB_READ_REG(&sc->hw, RJC);
2011 	sc->stats.torl += IXGB_READ_REG(&sc->hw, TORL);
2012 	sc->stats.torh += IXGB_READ_REG(&sc->hw, TORH);
2013 	sc->stats.totl += IXGB_READ_REG(&sc->hw, TOTL);
2014 	sc->stats.toth += IXGB_READ_REG(&sc->hw, TOTH);
2015 	sc->stats.tprl += IXGB_READ_REG(&sc->hw, TPRL);
2016 	sc->stats.tprh += IXGB_READ_REG(&sc->hw, TPRH);
2017 	sc->stats.tptl += IXGB_READ_REG(&sc->hw, TPTL);
2018 	sc->stats.tpth += IXGB_READ_REG(&sc->hw, TPTH);
2019 	sc->stats.plt64c += IXGB_READ_REG(&sc->hw, PLT64C);
2020 	sc->stats.mptcl += IXGB_READ_REG(&sc->hw, MPTCL);
2021 	sc->stats.mptch += IXGB_READ_REG(&sc->hw, MPTCH);
2022 	sc->stats.bptcl += IXGB_READ_REG(&sc->hw, BPTCL);
2023 	sc->stats.bptch += IXGB_READ_REG(&sc->hw, BPTCH);
2024 
2025 	sc->stats.uprcl += IXGB_READ_REG(&sc->hw, UPRCL);
2026 	sc->stats.uprch += IXGB_READ_REG(&sc->hw, UPRCH);
2027 	sc->stats.vprcl += IXGB_READ_REG(&sc->hw, VPRCL);
2028 	sc->stats.vprch += IXGB_READ_REG(&sc->hw, VPRCH);
2029 	sc->stats.jprcl += IXGB_READ_REG(&sc->hw, JPRCL);
2030 	sc->stats.jprch += IXGB_READ_REG(&sc->hw, JPRCH);
2031 	sc->stats.rnbc += IXGB_READ_REG(&sc->hw, RNBC);
2032 	sc->stats.icbc += IXGB_READ_REG(&sc->hw, ICBC);
2033 	sc->stats.ecbc += IXGB_READ_REG(&sc->hw, ECBC);
2034 	sc->stats.uptcl += IXGB_READ_REG(&sc->hw, UPTCL);
2035 	sc->stats.uptch += IXGB_READ_REG(&sc->hw, UPTCH);
2036 	sc->stats.vptcl += IXGB_READ_REG(&sc->hw, VPTCL);
2037 	sc->stats.vptch += IXGB_READ_REG(&sc->hw, VPTCH);
2038 	sc->stats.jptcl += IXGB_READ_REG(&sc->hw, JPTCL);
2039 	sc->stats.jptch += IXGB_READ_REG(&sc->hw, JPTCH);
2040 	sc->stats.tsctc += IXGB_READ_REG(&sc->hw, TSCTC);
2041 	sc->stats.tsctfc += IXGB_READ_REG(&sc->hw, TSCTFC);
2042 	sc->stats.ibic += IXGB_READ_REG(&sc->hw, IBIC);
2043 	sc->stats.lfc += IXGB_READ_REG(&sc->hw, LFC);
2044 	sc->stats.pfrc += IXGB_READ_REG(&sc->hw, PFRC);
2045 	sc->stats.pftc += IXGB_READ_REG(&sc->hw, PFTC);
2046 	sc->stats.mcfrc += IXGB_READ_REG(&sc->hw, MCFRC);
2047 
2048 	ifp = &sc->interface_data.ac_if;
2049 
2050 	/* Fill out the OS statistics structure */
2051 	ifp->if_collisions = 0;
2052 
2053 	/* Rx Errors */
2054 	ifp->if_ierrors =
2055 		sc->dropped_pkts +
2056 		sc->stats.crcerrs +
2057 		sc->stats.rnbc +
2058 		sc->stats.mpc +
2059 		sc->stats.rlec;
2060 
2061 	/* Tx Errors */
2062 	ifp->if_oerrors =
2063 		sc->watchdog_events;
2064 }
2065 
2066 #ifdef IXGB_DEBUG
2067 /**********************************************************************
2068  *
2069  *  This routine is called only when ixgb_display_debug_stats is enabled.
2070  *  This routine provides a way to take a look at important statistics
2071  *  maintained by the driver and hardware.
2072  *
2073  **********************************************************************/
2074 void
2075 ixgb_print_hw_stats(struct ixgb_softc *sc)
2076 {
2077 	char            buf_speed[100], buf_type[100];
2078 	ixgb_bus_speed  bus_speed;
2079 	ixgb_bus_type   bus_type;
2080 	const char * const unit = sc->sc_dv.dv_xname;
2081 
2082 	bus_speed = sc->hw.bus.speed;
2083 	bus_type = sc->hw.bus.type;
2084 	snprintf(buf_speed, sizeof(buf_speed),
2085 		bus_speed == ixgb_bus_speed_33 ? "33MHz" :
2086 		bus_speed == ixgb_bus_speed_66 ? "66MHz" :
2087 		bus_speed == ixgb_bus_speed_100 ? "100MHz" :
2088 		bus_speed == ixgb_bus_speed_133 ? "133MHz" :
2089 		"UNKNOWN");
2090 	printf("%s: PCI_Bus_Speed = %s\n", unit,
2091 		buf_speed);
2092 
2093 	snprintf(buf_type, sizeof(buf_type),
2094 		bus_type == ixgb_bus_type_pci ? "PCI" :
2095 		bus_type == ixgb_bus_type_pcix ? "PCI-X" :
2096 		"UNKNOWN");
2097 	printf("%s: PCI_Bus_Type = %s\n", unit,
2098 		buf_type);
2099 
2100 	printf("%s: Tx Descriptors not Avail1 = %ld\n", unit,
2101 		sc->no_tx_desc_avail1);
2102 	printf("%s: Tx Descriptors not Avail2 = %ld\n", unit,
2103 		sc->no_tx_desc_avail2);
2104 	printf("%s: Std Mbuf Failed = %ld\n", unit,
2105 		sc->mbuf_alloc_failed);
2106 	printf("%s: Std Cluster Failed = %ld\n", unit,
2107 		sc->mbuf_cluster_failed);
2108 
2109 	printf("%s: Defer count = %lld\n", unit,
2110 		(long long)sc->stats.dc);
2111 	printf("%s: Missed Packets = %lld\n", unit,
2112 		(long long)sc->stats.mpc);
2113 	printf("%s: Receive No Buffers = %lld\n", unit,
2114 		(long long)sc->stats.rnbc);
2115 	printf("%s: Receive length errors = %lld\n", unit,
2116 		(long long)sc->stats.rlec);
2117 	printf("%s: Crc errors = %lld\n", unit,
2118 		(long long)sc->stats.crcerrs);
2119 	printf("%s: Driver dropped packets = %ld\n", unit,
2120 		sc->dropped_pkts);
2121 
2122 	printf("%s: XON Rcvd = %lld\n", unit,
2123 		(long long)sc->stats.xonrxc);
2124 	printf("%s: XON Xmtd = %lld\n", unit,
2125 		(long long)sc->stats.xontxc);
2126 	printf("%s: XOFF Rcvd = %lld\n", unit,
2127 		(long long)sc->stats.xoffrxc);
2128 	printf("%s: XOFF Xmtd = %lld\n", unit,
2129 		(long long)sc->stats.xofftxc);
2130 
2131 	printf("%s: Good Packets Rcvd = %lld\n", unit,
2132 		(long long)sc->stats.gprcl);
2133 	printf("%s: Good Packets Xmtd = %lld\n", unit,
2134 		(long long)sc->stats.gptcl);
2135 
2136 	printf("%s: Jumbo frames recvd = %lld\n", unit,
2137 		(long long)sc->stats.jprcl);
2138 	printf("%s: Jumbo frames Xmtd = %lld\n", unit,
2139 		(long long)sc->stats.jptcl);
2140 }
2141 #endif
2142