xref: /openbsd-src/sys/dev/pci/if_em.c (revision db3296cf5c1dd9058ceecc3a29fe4aaa0bd26000)
1 /**************************************************************************
2 
3 Copyright (c) 2001-2003, Intel Corporation
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15 
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19 
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31 
32 ***************************************************************************/
33 
34 /*$FreeBSD: if_em.c,v 1.26 2003/06/05 17:51:37 pdeuskar Exp $*/
35 /* $OpenBSD: if_em.c,v 1.10 2003/06/29 21:42:53 avsm Exp $ */
36 
37 #include "bpfilter.h"
38 #include "vlan.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sockio.h>
43 #include <sys/mbuf.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/device.h>
47 #include <sys/socket.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 
53 #ifdef INET
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/in_var.h>
57 #include <netinet/ip.h>
58 #include <netinet/if_ether.h>
59 #endif
60 
61 #if NVLAN > 0
62 #include <net/if_types.h>
63 #include <net/if_vlan_var.h>
64 #endif
65 
66 #if NBPFILTER > 0
67 #include <net/bpf.h>
68 #endif
69 
70 #include <uvm/uvm_extern.h>
71 
72 #include <dev/pci/pcireg.h>
73 #include <dev/pci/pcivar.h>
74 #include <dev/pci/pcidevs.h>
75 
76 #include <dev/pci/if_em.h>
77 
78 #ifdef DEBUG
79 #define EM_KASSERT(exp,msg)        do { if (!(exp)) panic msg; } while (0)
80 #else
81 #define EM_KASSERT(exp,msg)
82 #endif
83 
84 /*********************************************************************
85  *  Set this to one to display debug statistics
86  *********************************************************************/
87 int             em_display_debug_stats = 0;
88 
89 /*********************************************************************
90  *  Linked list of board private structures for all NICs found
91  *********************************************************************/
92 
93 struct em_softc *em_adapter_list = NULL;
94 
95 
96 /*********************************************************************
97  *  Driver version
98  *********************************************************************/
99 
100 char em_driver_version[] = "1.6.6";
101 
102 
103 /*********************************************************************
104  *  PCI Device ID Table
105  *********************************************************************/
106 const struct pci_matchid em_devices[] = {
107 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542 },
108 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_SC },
109 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC },
110 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI },
111 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_SC },
112 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC },
113 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM },
114 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM },
115 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM },
116 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB },
117 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_SC },
118 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_SC },
119 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI },
120 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM },
121 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM },
122 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP },
123 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EP },
124 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI },
125 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD },
126 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP }
127 };
128 
129 /*********************************************************************
130  *  Function prototypes
131  *********************************************************************/
132 int  em_probe(struct device *, void *, void *);
133 void em_attach(struct device *, struct device *, void *);
134 
135 #if 0
136 int  em_detach(void *);
137 int  em_shutdown(void *);
138 #endif
139 int  em_intr(void *);
140 void em_start(struct ifnet *);
141 int  em_ioctl(struct ifnet *, u_long, caddr_t);
142 void em_watchdog(struct ifnet *);
143 void em_init(void *);
144 void em_stop(void *);
145 void em_media_status(struct ifnet *, struct ifmediareq *);
146 int  em_media_change(struct ifnet *);
147 void em_identify_hardware(struct em_softc *);
148 int  em_allocate_pci_resources(struct em_softc *);
149 void em_free_pci_resources(struct em_softc *);
150 void em_local_timer(void *);
151 int  em_hardware_init(struct em_softc *);
152 void em_setup_interface(struct em_softc *);
153 int  em_setup_transmit_structures(struct em_softc *);
154 void em_initialize_transmit_unit(struct em_softc *);
155 int  em_setup_receive_structures(struct em_softc *);
156 void em_initialize_receive_unit(struct em_softc *);
157 void em_enable_intr(struct em_softc *);
158 void em_disable_intr(struct em_softc *);
159 void em_free_transmit_structures(struct em_softc *);
160 void em_free_receive_structures(struct em_softc *);
161 void em_update_stats_counters(struct em_softc *);
162 void em_clean_transmit_interrupts(struct em_softc *);
163 int  em_allocate_receive_structures(struct em_softc *);
164 int  em_allocate_transmit_structures(struct em_softc *);
165 void em_process_receive_interrupts(struct em_softc *, int);
166 void em_receive_checksum(struct em_softc *,
167 				     struct em_rx_desc *,
168 				     struct mbuf *);
169 void em_transmit_checksum_setup(struct em_softc *,
170 					    struct mbuf *,
171 					    u_int32_t *,
172 					    u_int32_t *);
173 void em_set_promisc(struct em_softc *);
174 void em_disable_promisc(struct em_softc *);
175 void em_set_multi(struct em_softc *);
176 void em_print_hw_stats(struct em_softc *);
177 void em_print_link_status(struct em_softc *);
178 int  em_get_buf(int i, struct em_softc *,
179 			    struct mbuf *);
180 void em_enable_vlans(struct em_softc *);
181 int  em_encap(struct em_softc *, struct mbuf *);
182 void em_smartspeed(struct em_softc *);
183 int  em_82547_fifo_workaround(struct em_softc *, int);
184 void em_82547_update_fifo_head(struct em_softc *, int);
185 int  em_82547_tx_fifo_reset(struct em_softc *);
186 void em_82547_move_tail(void *);
187 int  em_dma_malloc(struct em_softc *, bus_size_t,
188     struct em_dma_alloc *, int);
189 void em_dma_free(struct em_softc *, struct em_dma_alloc *);
190 void em_print_debug_info(struct em_softc *);
191 int  em_is_valid_ether_addr(u_int8_t *);
192 
193 /*********************************************************************
194  *  FreeBSD Device Interface Entry Points
195  *********************************************************************/
196 
197 struct cfattach em_ca = {
198 	sizeof(struct em_softc), em_probe, em_attach
199 };
200 
201 struct cfdriver em_cd = {
202 	0, "em", DV_IFNET
203 };
204 
205 /*********************************************************************
206  *  Device identification routine
207  *
208  *  em_probe determines if the driver should be loaded on
209  *  adapter based on PCI vendor/device id of the adapter.
210  *
211  *  return 0 on success, positive on failure
212  *********************************************************************/
213 
214 int
215 em_probe(struct device *parent, void *match, void *aux)
216 {
217 	INIT_DEBUGOUT("em_probe: begin");
218 
219 	return (pci_matchbyid((struct pci_attach_args *)aux, em_devices,
220 	    sizeof(em_devices)/sizeof(em_devices[0])));
221 }
222 
223 /*********************************************************************
224  *  Device initialization routine
225  *
226  *  The attach entry point is called when the driver is being loaded.
227  *  This routine identifies the type of hardware, allocates all resources
228  *  and initializes the hardware.
229  *
230  *  return 0 on success, positive on failure
231  *********************************************************************/
232 
233 void
234 em_attach(struct device *parent, struct device *self, void *aux)
235 {
236 	struct pci_attach_args *pa = aux;
237 #if 0
238 	pci_chipset_tag_t pc = pa->pa_pc;
239 #endif
240 	struct em_softc *sc = (struct em_softc *)self;
241 	int		s;
242 	int		tsize, rsize;
243 	int		error = 0;
244 
245 	INIT_DEBUGOUT("em_attach: begin");
246 	s = splimp();
247 
248 #ifdef __FreeBSD__
249 	/* Allocate, clear, and link in our sc structure */
250 	if (!(sc = device_get_softc(dev))) {
251 		printf("em: sc structure allocation failed\n");
252 		splx(s);
253 		return(ENOMEM);
254 	}
255 	bzero(sc, sizeof(struct em_softc ));
256 	sc->dev = dev;
257 	sc->osdep.dev = dev;
258 	sc->sc_dv.dv_xname = device_get_unit(dev);
259 #endif /* __FreeBSD__ */
260 
261 	sc->osdep.em_pa = *pa;
262 
263 	if (em_adapter_list != NULL)
264 		em_adapter_list->prev = sc;
265 	sc->next = em_adapter_list;
266 	em_adapter_list = sc;
267 
268 #ifdef __FreeBSD__
269 	/* SYSCTL stuff */
270 	sysctl_ctx_init(&sc->sysctl_ctx);
271 	sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
272 					       SYSCTL_STATIC_CHILDREN(_hw),
273 					       OID_AUTO,
274 					       device_get_nameunit(dev),
275 					       CTLFLAG_RD,
276 					       0, "");
277 	if (sc->sysctl_tree == NULL) {
278 		error = EIO;
279 		goto err_sysctl;
280 	}
281 
282 	SYSCTL_ADD_PROC(&sc->sysctl_ctx,
283 			SYSCTL_CHILDREN(sc->sysctl_tree),
284 			OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
285 			(void *)sc, 0,
286 			em_sysctl_debug_info, "I", "Debug Information");
287 
288 	SYSCTL_ADD_PROC(&sc->sysctl_ctx,
289 			SYSCTL_CHILDREN(sc->sysctl_tree),
290 			OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
291 			(void *)sc, 0,
292 			em_sysctl_stats, "I", "Statistics");
293 
294 	callout_handle_init(&sc->timer_handle);
295 	callout_handle_init(&sc->tx_fifo_timer_handle);
296 #endif /* __FreeBSD__ */
297 
298 	timeout_set(&sc->timer_handle, em_local_timer, sc);
299 	timeout_set(&sc->tx_fifo_timer_handle, em_82547_move_tail, sc);
300 
301 	/* Determine hardware revision */
302 	em_identify_hardware(sc);
303 
304 	/* Parameters (to be read from user) */
305 	sc->num_tx_desc = EM_MAX_TXD;
306 	sc->num_rx_desc = EM_MAX_RXD;
307 	sc->tx_int_delay = EM_TIDV;
308 	sc->tx_abs_int_delay = EM_TADV;
309 	sc->rx_int_delay = EM_RDTR;
310 	sc->rx_abs_int_delay = EM_RADV;
311 	sc->hw.autoneg = DO_AUTO_NEG;
312 	sc->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
313 	sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
314 	sc->hw.tbi_compatibility_en = TRUE;
315 	sc->rx_buffer_len = EM_RXBUFFER_2048;
316 
317 	/*
318 	 * These parameters control the automatic generation(Tx) and
319 	 * response(Rx) to Ethernet PAUSE frames.
320 	 */
321 	sc->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
322 	sc->hw.fc_low_water  = FC_DEFAULT_LO_THRESH;
323 	sc->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
324 	sc->hw.fc_send_xon   = TRUE;
325 	sc->hw.fc = em_fc_full;
326 
327 	sc->hw.phy_init_script = 1;
328 
329 	/*
330 	 * Set the max frame size assuming standard ethernet
331 	 * sized frames
332 	 */
333 	sc->hw.max_frame_size =
334 	    ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
335 
336 	sc->hw.min_frame_size =
337 	    MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
338 
339 	/*
340 	 * This controls when hardware reports transmit completion
341 	 * status.
342 	 */
343 	sc->hw.report_tx_early = 1;
344 
345 
346 	if (em_allocate_pci_resources(sc)) {
347 		printf("%s: Allocation of PCI resources failed\n",
348 		       sc->sc_dv.dv_xname);
349 		error = ENXIO;
350 		goto err_pci;
351 	}
352 
353 
354 	/* Initialize eeprom parameters */
355 	em_init_eeprom_params(&sc->hw);
356 
357 	tsize = EM_ROUNDUP(sc->num_tx_desc *
358 			   sizeof(struct em_tx_desc), 4096);
359 
360 	/* Allocate Transmit Descriptor ring */
361 	if (em_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
362 		printf("%s: Unable to allocate tx_desc memory\n",
363 		       sc->sc_dv.dv_xname);
364 		error = ENOMEM;
365 		goto err_tx_desc;
366 	}
367 	sc->tx_desc_base = (struct em_tx_desc *)sc->txdma.dma_vaddr;
368 
369 	rsize = EM_ROUNDUP(sc->num_rx_desc *
370 			   sizeof(struct em_rx_desc), 4096);
371 
372 	/* Allocate Receive Descriptor ring */
373 	if (em_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
374 		printf("%s: Unable to allocate rx_desc memory\n",
375 		       sc->sc_dv.dv_xname);
376 		error = ENOMEM;
377 		goto err_rx_desc;
378 	}
379 	sc->rx_desc_base = (struct em_rx_desc *) sc->rxdma.dma_vaddr;
380 
381 	/* Initialize the hardware */
382 	if (em_hardware_init(sc)) {
383 		printf("%s: Unable to initialize the hardware\n",
384 		       sc->sc_dv.dv_xname);
385 		error = EIO;
386 		goto err_hw_init;
387 	}
388 
389 	/* Copy the permanent MAC address out of the EEPROM */
390 	if (em_read_mac_addr(&sc->hw) < 0) {
391 		printf("%s: EEPROM read error while reading mac address\n",
392 		       sc->sc_dv.dv_xname);
393 		error = EIO;
394 		goto err_mac_addr;
395 	}
396 
397 	if (!em_is_valid_ether_addr(sc->hw.mac_addr)) {
398 		printf("%s: Invalid mac address\n", sc->sc_dv.dv_xname);
399 		error = EIO;
400 		goto err_mac_addr;
401 	}
402 
403 	bcopy(sc->hw.mac_addr, sc->interface_data.ac_enaddr,
404 	      ETHER_ADDR_LEN);
405 
406 	printf(", address: %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
407 
408 	/* Setup OS specific network interface */
409 	em_setup_interface(sc);
410 
411 	/* Initialize statistics */
412 	em_clear_hw_cntrs(&sc->hw);
413 	em_update_stats_counters(sc);
414 	sc->hw.get_link_status = 1;
415 	em_check_for_link(&sc->hw);
416 
417 	/* Print the link status */
418 	if (sc->link_active == 1) {
419 		em_get_speed_and_duplex(&sc->hw, &sc->link_speed,
420 					&sc->link_duplex);
421 	}
422 
423 	INIT_DEBUGOUT("em_attach: end");
424 	splx(s);
425 	return;
426 
427 err_mac_addr:
428 err_hw_init:
429 	em_dma_free(sc, &sc->rxdma);
430 err_rx_desc:
431 	em_dma_free(sc, &sc->txdma);
432 err_tx_desc:
433 err_pci:
434 	em_free_pci_resources(sc);
435 #ifdef __FreeBSD__
436 	sysctl_ctx_free(&sc->sysctl_ctx);
437 #endif /* __FreeBSD__ */
438 /*err_sysctl:*/
439 	splx(s);
440 	return;
441 
442 }
443 
444 /*********************************************************************
445  *  Device removal routine
446  *
447  *  The detach entry point is called when the driver is being removed.
448  *  This routine stops the adapter and deallocates all the resources
449  *  that were allocated for driver operation.
450  *
451  *  return 0 on success, positive on failure
452  *********************************************************************/
453 #ifdef __FreeBSD__
454 int
455 em_detach(void* arg)
456 {
457 	struct em_softc *sc = arg;
458 	struct ifnet   *ifp = &sc->interface_data.ac_if;
459 	int		s;
460 
461 	INIT_DEBUGOUT("em_detach: begin");
462 	s = splimp();
463 
464 	em_stop(sc);
465 	em_phy_hw_reset(&sc->hw);
466 #if __FreeBSD_version < 500000
467 	ether_ifdetach(&sc->interface_data.ac_if, ETHER_BPF_SUPPORTED);
468 #else
469 	ether_ifdetach(&sc->interface_data.ac_if);
470 #endif
471 	em_free_pci_resources(sc);
472 
473 	/* Free Transmit Descriptor ring */
474 	if (sc->tx_desc_base) {
475 		em_dma_free(sc, &sc->txdma);
476 		sc->tx_desc_base = NULL;
477 	}
478 
479 	/* Free Receive Descriptor ring */
480 	if (sc->rx_desc_base) {
481 		em_dma_free(sc, &sc->rxdma);
482 		sc->rx_desc_base = NULL;
483 	}
484 
485 	/* Remove from the adapter list */
486 	if (em_adapter_list == sc)
487 		em_adapter_list = sc->next;
488 	if (sc->next != NULL)
489 		sc->next->prev = sc->prev;
490 	if (sc->prev != NULL)
491 		sc->prev->next = sc->next;
492 
493 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
494 	ifp->if_timer = 0;
495 
496 	splx(s);
497 	return(0);
498 }
499 
500 /*********************************************************************
501  *
502  *  Shutdown entry point
503  *
504  **********************************************************************/
505 
506 int
507 em_shutdown(void* arg)
508 {
509 	struct em_softc *sc = arg;
510 	em_stop(sc);
511 	return(0);
512 }
513 
514 #endif /* __FreeBSD__ */
515 
516 /*********************************************************************
517  *  Transmit entry point
518  *
519  *  em_start is called by the stack to initiate a transmit.
520  *  The driver will remain in this routine as long as there are
521  *  packets to transmit and transmit resources are available.
522  *  In case resources are not available stack is notified and
523  *  the packet is requeued.
524  **********************************************************************/
525 
526 void
527 em_start(struct ifnet *ifp)
528 {
529 	int		s;
530 	struct mbuf    *m_head;
531 	struct em_softc *sc = ifp->if_softc;
532 
533 	if (!sc->link_active)
534 		return;
535 
536 	s = splimp();
537 
538 	for (;;) {
539 		IFQ_POLL(&ifp->if_snd, m_head);
540 
541 		if (m_head == NULL) break;
542 
543 		if (em_encap(sc, m_head)) {
544 			ifp->if_flags |= IFF_OACTIVE;
545 			break;
546 		}
547 
548 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
549 
550 #if NBPFILTER > 0
551 		/* Send a copy of the frame to the BPF listener */
552 		if (ifp->if_bpf)
553 			bpf_mtap(ifp->if_bpf, m_head);
554 #endif
555 
556 		/* Set timeout in case hardware has problems transmitting */
557 		ifp->if_timer = EM_TX_TIMEOUT;
558 
559 	}
560 	splx(s);
561 	return;
562 }
563 
564 /*********************************************************************
565  *  Ioctl entry point
566  *
567  *  em_ioctl is called when the user wants to configure the
568  *  interface.
569  *
570  *  return 0 on success, positive on failure
571  **********************************************************************/
572 
573 int
574 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
575 {
576 	int		s, error = 0;
577 	struct ifreq   *ifr = (struct ifreq *) data;
578 	struct ifaddr  *ifa = (struct ifaddr *)data;
579 	struct em_softc * sc = ifp->if_softc;
580 
581 	s = splimp();
582 
583 	if ((error = ether_ioctl(ifp, &sc->interface_data, command, data)) > 0) {
584 		splx(s);
585 		return (error);
586 	}
587 
588 	switch (command) {
589 	case SIOCSIFADDR:
590 #ifdef __FreeBSD__
591 	case SIOCGIFADDR:
592 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
593 		ether_ioctl(ifp, command, data);
594 		break;
595 #endif /* __FreeBSD__ */
596 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
597 			       "Addr)");
598 		ifp->if_flags |= IFF_UP;
599 		em_init(sc);
600 		switch (ifa->ifa_addr->sa_family) {
601 #ifdef INET
602 		case AF_INET:
603 			arp_ifinit(&sc->interface_data, ifa);
604 			break;
605 #endif /* INET */
606 		default:
607 			break;
608 		}
609 		break;
610 	case SIOCSIFMTU:
611 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
612 		if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
613 			error = EINVAL;
614 		} else {
615 			ifp->if_mtu = ifr->ifr_mtu;
616 			sc->hw.max_frame_size =
617 			ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
618 			em_init(sc);
619 		}
620 		break;
621 	case SIOCSIFFLAGS:
622 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
623 		if (ifp->if_flags & IFF_UP) {
624 			if (!(ifp->if_flags & IFF_RUNNING))
625 				em_init(sc);
626 
627 			em_disable_promisc(sc);
628 			em_set_promisc(sc);
629 		} else {
630 			if (ifp->if_flags & IFF_RUNNING) {
631 				em_stop(sc);
632 			}
633 		}
634 		break;
635 	case SIOCADDMULTI:
636 	case SIOCDELMULTI:
637 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
638 		error = (command == SIOCADDMULTI)
639 			? ether_addmulti(ifr, &sc->interface_data)
640 			: ether_delmulti(ifr, &sc->interface_data);
641 
642 		if (error == ENETRESET) {
643 			if (ifp->if_flags & IFF_RUNNING) {
644 				em_disable_intr(sc);
645 				em_set_multi(sc);
646 				if (sc->hw.mac_type == em_82542_rev2_0) {
647 					em_initialize_receive_unit(sc);
648 				}
649 #ifdef DEVICE_POLLING
650 				if (!(ifp->if_ipending & IFF_POLLING))
651 #endif
652 					em_enable_intr(sc);
653 			}
654 			error = 0;
655 		}
656 		break;
657 	case SIOCSIFMEDIA:
658 	case SIOCGIFMEDIA:
659 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
660 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
661 		break;
662 #ifdef __FreeBSD__
663 	case SIOCSIFCAP:
664 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
665 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
666 		if (mask & IFCAP_HWCSUM) {
667 			if (IFCAP_HWCSUM & ifp->if_capenable)
668 				ifp->if_capenable &= ~IFCAP_HWCSUM;
669 			else
670 				ifp->if_capenable |= IFCAP_HWCSUM;
671 			if (ifp->if_flags & IFF_RUNNING)
672 				em_init(sc);
673 		}
674 		break;
675 #endif /* __FreeBSD__ */
676 	default:
677 		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%d)\n", (int)command);
678 		error = EINVAL;
679 	}
680 
681 	splx(s);
682 	return(error);
683 }
684 
685 /*********************************************************************
686  *  Watchdog entry point
687  *
688  *  This routine is called whenever hardware quits transmitting.
689  *
690  **********************************************************************/
691 
692 void
693 em_watchdog(struct ifnet *ifp)
694 {
695 	struct em_softc * sc;
696 	sc = ifp->if_softc;
697 
698 	/* If we are in this routine because of pause frames, then
699 	 * don't reset the hardware.
700 	 */
701 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_TXOFF) {
702 		ifp->if_timer = EM_TX_TIMEOUT;
703 		return;
704 	}
705 
706 	printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
707 
708 	ifp->if_flags &= ~IFF_RUNNING;
709 
710 	em_stop(sc);
711 	em_init(sc);
712 
713 	ifp->if_oerrors++;
714 	return;
715 }
716 
717 /*********************************************************************
718  *  Init entry point
719  *
720  *  This routine is used in two ways. It is used by the stack as
721  *  init entry point in network interface structure. It is also used
722  *  by the driver as a hw/sw initialization routine to get to a
723  *  consistent state.
724  *
725  *  return 0 on success, positive on failure
726  **********************************************************************/
727 
728 void
729 em_init(void *arg)
730 {
731 	int		s;
732 	struct ifnet   *ifp;
733 	struct em_softc * sc = arg;
734 
735 	INIT_DEBUGOUT("em_init: begin");
736 
737 	s = splimp();
738 
739 	em_stop(sc);
740 
741 	/* Initialize the hardware */
742 	if (em_hardware_init(sc)) {
743 		printf("%s: Unable to initialize the hardware\n",
744 		       sc->sc_dv.dv_xname);
745 		splx(s);
746 		return;
747 	}
748 
749 	em_enable_vlans(sc);
750 
751 	/* Prepare transmit descriptors and buffers */
752 	if (em_setup_transmit_structures(sc)) {
753 		printf("%s: Could not setup transmit structures\n",
754 		       sc->sc_dv.dv_xname);
755 		em_stop(sc);
756 		splx(s);
757 		return;
758 	}
759 	em_initialize_transmit_unit(sc);
760 
761 	/* Setup Multicast table */
762 	em_set_multi(sc);
763 
764 	/* Prepare receive descriptors and buffers */
765 	if (em_setup_receive_structures(sc)) {
766 		printf("%s: Could not setup receive structures\n",
767 		       sc->sc_dv.dv_xname);
768 		em_stop(sc);
769 		splx(s);
770 		return;
771 	}
772 	em_initialize_receive_unit(sc);
773 
774 	ifp = &sc->interface_data.ac_if;
775 	ifp->if_flags |= IFF_RUNNING;
776 	ifp->if_flags &= ~IFF_OACTIVE;
777 
778 #ifdef __FreeBSD__
779 	if (sc->hw.mac_type >= em_82543) {
780 		if (ifp->if_capenable & IFCAP_TXCSUM)
781 			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
782 		else
783 			ifp->if_hwassist = 0;
784 	}
785 #endif /* __FreeBSD__ */
786 
787 	timeout_add(&sc->timer_handle, 2*hz);
788 	em_clear_hw_cntrs(&sc->hw);
789 #ifdef DEVICE_POLLING
790         /*
791          * Only enable interrupts if we are not polling, make sure
792          * they are off otherwise.
793          */
794         if (ifp->if_ipending & IFF_POLLING)
795                 em_disable_intr(sc);
796         else
797 #endif /* DEVICE_POLLING */
798 		em_enable_intr(sc);
799 
800 	splx(s);
801 	return;
802 }
803 
804 
805 #ifdef DEVICE_POLLING
806 static poll_handler_t em_poll;
807 
808 static void
809 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
810 {
811 	struct em_softc *sc = ifp->if_softc;
812 	u_int32_t reg_icr;
813 
814 	if (cmd == POLL_DEREGISTER) {	    /* final call, enable interrupts */
815 		em_enable_intr(sc);
816 		return;
817 	}
818 	if (cmd == POLL_AND_CHECK_STATUS) {
819 		reg_icr = E1000_READ_REG(&sc->hw, ICR);
820 		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
821 			untimeout(em_local_timer, sc, sc->timer_handle);
822 			sc->hw.get_link_status = 1;
823 			em_check_for_link(&sc->hw);
824 			em_print_link_status(sc);
825 			sc->timer_handle = timeout(em_local_timer, sc, 2*hz);
826 		}
827 	}
828 	if (ifp->if_flags & IFF_RUNNING) {
829 		em_process_receive_interrupts(sc, count);
830 		em_clean_transmit_interrupts(sc);
831 	}
832 
833 	if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
834 		em_start(ifp);
835 }
836 #endif /* DEVICE_POLLING */
837 
838 /*********************************************************************
839  *
840  *  Interrupt Service routine
841  *
842  **********************************************************************/
843 int
844 em_intr(void *arg)
845 {
846 	u_int32_t	loop_cnt = EM_MAX_INTR;
847 	u_int32_t	reg_icr;
848 	struct ifnet	*ifp;
849 	struct em_softc *sc = arg;
850 
851 	ifp = &sc->interface_data.ac_if;
852 
853 #ifdef DEVICE_POLLING
854 	if (ifp->if_ipending & IFF_POLLING)
855 		return;
856 
857 	if (ether_poll_register(em_poll, ifp)) {
858 		em_disable_intr(sc);
859 		em_poll(ifp, 0, 1);
860 		return;
861 	}
862 #endif /* DEVICE_POLLING */
863 	reg_icr = E1000_READ_REG(&sc->hw, ICR);
864 	if (!reg_icr) {
865 		return (0);
866 	}
867 
868 	/* Link status change */
869 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
870 		timeout_del(&sc->timer_handle);
871 		sc->hw.get_link_status = 1;
872 		em_check_for_link(&sc->hw);
873 		em_print_link_status(sc);
874 		timeout_add(&sc->timer_handle, 2*hz);
875 	}
876 
877 	while (loop_cnt > 0) {
878 		if (ifp->if_flags & IFF_RUNNING) {
879 			em_process_receive_interrupts(sc, -1);
880 			em_clean_transmit_interrupts(sc);
881 		}
882 		loop_cnt--;
883 	}
884 
885 	if (ifp->if_flags & IFF_RUNNING && IFQ_IS_EMPTY(&ifp->if_snd) == 0)
886 		em_start(ifp);
887 
888 	return (1);
889 }
890 
891 
892 
893 /*********************************************************************
894  *
895  *  Media Ioctl callback
896  *
897  *  This routine is called whenever the user queries the status of
898  *  the interface using ifconfig.
899  *
900  **********************************************************************/
901 void
902 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
903 {
904 	struct em_softc * sc= ifp->if_softc;
905 
906 	INIT_DEBUGOUT("em_media_status: begin");
907 
908 	em_check_for_link(&sc->hw);
909 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU) {
910 		if (sc->link_active == 0) {
911 			em_get_speed_and_duplex(&sc->hw,
912 						&sc->link_speed,
913 						&sc->link_duplex);
914 			sc->link_active = 1;
915 		}
916 	} else {
917 		if (sc->link_active == 1) {
918 			sc->link_speed = 0;
919 			sc->link_duplex = 0;
920 			sc->link_active = 0;
921 		}
922 	}
923 
924 	ifmr->ifm_status = IFM_AVALID;
925 	ifmr->ifm_active = IFM_ETHER;
926 
927 	if (!sc->link_active)
928 		return;
929 
930 	ifmr->ifm_status |= IFM_ACTIVE;
931 
932 	if (sc->hw.media_type == em_media_type_fiber) {
933 		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
934 	} else {
935 		switch (sc->link_speed) {
936 		case 10:
937 			ifmr->ifm_active |= IFM_10_T;
938 			break;
939 		case 100:
940 			ifmr->ifm_active |= IFM_100_TX;
941 			break;
942 		case 1000:
943 			ifmr->ifm_active |= IFM_1000_T;
944 			break;
945 		}
946 		if (sc->link_duplex == FULL_DUPLEX)
947 			ifmr->ifm_active |= IFM_FDX;
948 		else
949 			ifmr->ifm_active |= IFM_HDX;
950 	}
951 	return;
952 }
953 
954 /*********************************************************************
955  *
956  *  Media Ioctl callback
957  *
958  *  This routine is called when the user changes speed/duplex using
959  *  media/mediopt option with ifconfig.
960  *
961  **********************************************************************/
962 int
963 em_media_change(struct ifnet *ifp)
964 {
965 	struct em_softc * sc = ifp->if_softc;
966 	struct ifmedia	*ifm = &sc->media;
967 
968 	INIT_DEBUGOUT("em_media_change: begin");
969 
970 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
971 		return(EINVAL);
972 
973 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
974 	case IFM_AUTO:
975 		sc->hw.autoneg = DO_AUTO_NEG;
976 		sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
977 		break;
978 	case IFM_1000_SX:
979 	case IFM_1000_T:
980 		sc->hw.autoneg = DO_AUTO_NEG;
981 		sc->hw.autoneg_advertised = ADVERTISE_1000_FULL;
982 		break;
983 	case IFM_100_TX:
984 		sc->hw.autoneg = FALSE;
985 		sc->hw.autoneg_advertised = 0;
986 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
987 			sc->hw.forced_speed_duplex = em_100_full;
988 		else
989 			sc->hw.forced_speed_duplex	= em_100_half;
990 		break;
991 	case IFM_10_T:
992 		sc->hw.autoneg = FALSE;
993 		sc->hw.autoneg_advertised = 0;
994 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
995 			sc->hw.forced_speed_duplex = em_10_full;
996 		else
997 			sc->hw.forced_speed_duplex	= em_10_half;
998 		break;
999 	default:
1000 		printf("%s: Unsupported media type\n", sc->sc_dv.dv_xname);
1001 	}
1002 
1003 	em_init(sc);
1004 
1005 	return(0);
1006 }
1007 
1008 #ifdef __FreeBSD__
1009 void
1010 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1011 {
1012 	struct em_q *q = arg;
1013 
1014 	if (error)
1015 		return;
1016 	EM_KASSERT(nsegs <= EM_MAX_SCATTER,
1017 		("Too many DMA segments returned when mapping tx packet"));
1018 	q->nsegs = nsegs;
1019 	bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1020 }
1021 #endif /* __FreeBSD__ */
1022 
1023 #define EM_FIFO_HDR		 0x10
1024 #define EM_82547_PKT_THRESH	 0x3e0
1025 #define EM_82547_TX_FIFO_SIZE	 0x2800
1026 #define EM_82547_TX_FIFO_BEGIN	 0xf00
1027 /*********************************************************************
1028  *
1029  *  This routine maps the mbufs to tx descriptors.
1030  *
1031  *  return 0 on success, positive on failure
1032  **********************************************************************/
1033 int
1034 em_encap(struct em_softc *sc, struct mbuf *m_head)
1035 {
1036 	u_int32_t	txd_upper;
1037 	u_int32_t	txd_lower;
1038 	int		i, j, error;
1039 #if NVLAN > 0
1040 	struct ifvlan *ifv = NULL;
1041 #endif
1042 	struct em_q	q;
1043 
1044 	struct em_buffer   *tx_buffer = NULL;
1045 	struct em_tx_desc *current_tx_desc = NULL;
1046 	/*struct ifnet	 *ifp = &sc->interface_data.ac_if;*/
1047 
1048 	/*
1049 	 * Force a cleanup if number of TX descriptors
1050 	 * available hits the threshold
1051 	 */
1052 	if (sc->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1053 		em_clean_transmit_interrupts(sc);
1054 		if (sc->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1055 			sc->no_tx_desc_avail1++;
1056 			return (ENOBUFS);
1057 		}
1058 	}
1059 
1060 	/*
1061 	 * Map the packet for DMA.
1062 	 */
1063 	if (bus_dmamap_create(sc->txtag, MCLBYTES, 32, 0, 0, BUS_DMA_NOWAIT,
1064 	    &q.map)) {
1065 		sc->no_tx_map_avail++;
1066 		return (ENOMEM);
1067 	}
1068 	error = bus_dmamap_load_mbuf(sc->txtag, q.map,
1069 				     m_head, BUS_DMA_NOWAIT);
1070 	if (error != 0) {
1071 		sc->no_tx_dma_setup++;
1072 		bus_dmamap_destroy(sc->txtag, q.map);
1073 		return (error);
1074 	}
1075 	EM_KASSERT(q.map->dm_nsegs!= 0, ("em_encap: empty packet"));
1076 
1077 	if (q.map->dm_nsegs > sc->num_tx_desc_avail) {
1078 		sc->no_tx_desc_avail2++;
1079 		bus_dmamap_destroy(sc->txtag, q.map);
1080 		return (ENOBUFS);
1081 	}
1082 
1083 
1084 #ifdef __FreeBSD__
1085 	if (ifp->if_hwassist > 0) {
1086 		em_transmit_checksum_setup(sc,	m_head,
1087 					   &txd_upper, &txd_lower);
1088 	} else
1089 #endif /* __FreeBSD__ */
1090 		txd_upper = txd_lower = 0;
1091 
1092 
1093 	/* Find out if we are in vlan mode */
1094 #if NVLAN > 0
1095 	if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1096 	    m_head->m_pkthdr.rcvif != NULL &&
1097 	    m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1098 		ifv = m_head->m_pkthdr.rcvif->if_softc;
1099 #endif
1100 
1101 	i = sc->next_avail_tx_desc;
1102 	for (j = 0; j < q.map->dm_nsegs; j++) {
1103 		tx_buffer = &sc->tx_buffer_area[i];
1104 		current_tx_desc = &sc->tx_desc_base[i];
1105 
1106 		current_tx_desc->buffer_addr = htole64(q.map->dm_segs[j].ds_addr);
1107 		current_tx_desc->lower.data = htole32(
1108 		    sc->txd_cmd | txd_lower | q.map->dm_segs[j].ds_len);
1109 		current_tx_desc->upper.data = htole32(txd_upper);
1110 
1111 		if (++i == sc->num_tx_desc)
1112 			i = 0;
1113 
1114 		tx_buffer->m_head = NULL;
1115 	}
1116 
1117 	sc->num_tx_desc_avail -= q.map->dm_nsegs;
1118 	sc->next_avail_tx_desc = i;
1119 
1120 #if NVLAN > 0
1121 	if (ifv != NULL) {
1122 		/* Set the vlan id */
1123 		current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1124 
1125 		/* Tell hardware to add tag */
1126 		current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1127 	}
1128 #endif
1129 
1130 	tx_buffer->m_head = m_head;
1131 	tx_buffer->map = q.map;
1132 	bus_dmamap_sync(sc->txtag, q.map, 0, q.map->dm_mapsize,
1133 	    BUS_DMASYNC_PREWRITE);
1134 
1135 	/*
1136 	 * Last Descriptor of Packet needs End Of Packet (EOP)
1137 	 */
1138 	current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1139 
1140 	/*
1141 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1142 	 * that this frame is available to transmit.
1143 	 */
1144 	if (sc->hw.mac_type == em_82547 &&
1145 	    sc->link_duplex == HALF_DUPLEX) {
1146 		em_82547_move_tail(sc);
1147 	} else {
1148 		E1000_WRITE_REG(&sc->hw, TDT, i);
1149 		if (sc->hw.mac_type == em_82547) {
1150 			em_82547_update_fifo_head(sc, m_head->m_pkthdr.len);
1151 		}
1152 	}
1153 
1154 	return (0);
1155 }
1156 
1157 /*********************************************************************
1158  *
1159  * 82547 workaround to avoid controller hang in half-duplex environment.
1160  * The workaround is to avoid queuing a large packet that would span
1161  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1162  * in this case. We do that only when FIFO is queiced.
1163  *
1164  **********************************************************************/
1165 void
1166 em_82547_move_tail(void *arg)
1167 {
1168 	int s;
1169 	struct em_softc *sc = arg;
1170 	uint16_t hw_tdt;
1171 	uint16_t sw_tdt;
1172 	struct em_tx_desc *tx_desc;
1173 	uint16_t length = 0;
1174 	boolean_t eop = 0;
1175 
1176 	s = splimp();
1177 	hw_tdt = E1000_READ_REG(&sc->hw, TDT);
1178 	sw_tdt = sc->next_avail_tx_desc;
1179 
1180 	while (hw_tdt != sw_tdt) {
1181 		tx_desc = &sc->tx_desc_base[hw_tdt];
1182 		length += tx_desc->lower.flags.length;
1183 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1184 		if(++hw_tdt == sc->num_tx_desc)
1185 			hw_tdt = 0;
1186 
1187 		if(eop) {
1188 			if (em_82547_fifo_workaround(sc, length)) {
1189 				sc->tx_fifo_wrk++;
1190 				timeout_add(&sc->tx_fifo_timer_handle, 1);
1191 				splx(s);
1192 				return;
1193 			}
1194 			else {
1195 				E1000_WRITE_REG(&sc->hw, TDT, hw_tdt);
1196 				em_82547_update_fifo_head(sc, length);
1197 				length = 0;
1198 			}
1199 		}
1200 	}
1201 	splx(s);
1202 	return;
1203 }
1204 
1205 int
1206 em_82547_fifo_workaround(struct em_softc *sc, int len)
1207 {
1208 	int fifo_space, fifo_pkt_len;
1209 
1210 	fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1211 
1212 	if (sc->link_duplex == HALF_DUPLEX) {
1213 		fifo_space = EM_82547_TX_FIFO_SIZE - sc->tx_fifo_head;
1214 
1215 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1216 			if (em_82547_tx_fifo_reset(sc)) {
1217 				return(0);
1218 			}
1219 			else {
1220 				return(1);
1221 			}
1222 		}
1223 	}
1224 
1225 	return(0);
1226 }
1227 
1228 void
1229 em_82547_update_fifo_head(struct em_softc *sc, int len)
1230 {
1231 	int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1232 
1233 	/* tx_fifo_head is always 16 byte aligned */
1234 	sc->tx_fifo_head += fifo_pkt_len;
1235 	if (sc->tx_fifo_head >= EM_82547_TX_FIFO_SIZE) {
1236 		sc->tx_fifo_head -= EM_82547_TX_FIFO_SIZE;
1237 	}
1238 
1239 	return;
1240 }
1241 
1242 
1243 int
1244 em_82547_tx_fifo_reset(struct em_softc *sc)
1245 {
1246 	uint32_t tctl;
1247 
1248 	if ( (E1000_READ_REG(&sc->hw, TDT) ==
1249 	      E1000_READ_REG(&sc->hw, TDH)) &&
1250 	     (E1000_READ_REG(&sc->hw, TDFT) ==
1251 	      E1000_READ_REG(&sc->hw, TDFH)) &&
1252 	     (E1000_READ_REG(&sc->hw, TDFTS) ==
1253 	      E1000_READ_REG(&sc->hw, TDFHS)) &&
1254 	     (E1000_READ_REG(&sc->hw, TDFPC) == 0)) {
1255 
1256 		/* Disable TX unit */
1257 		tctl = E1000_READ_REG(&sc->hw, TCTL);
1258 		E1000_WRITE_REG(&sc->hw, TCTL, tctl & ~E1000_TCTL_EN);
1259 
1260 		/* Reset FIFO pointers */
1261 		E1000_WRITE_REG(&sc->hw, TDFT, EM_82547_TX_FIFO_BEGIN);
1262 		E1000_WRITE_REG(&sc->hw, TDFH, EM_82547_TX_FIFO_BEGIN);
1263 		E1000_WRITE_REG(&sc->hw, TDFTS, EM_82547_TX_FIFO_BEGIN);
1264 		E1000_WRITE_REG(&sc->hw, TDFHS, EM_82547_TX_FIFO_BEGIN);
1265 
1266 		/* Re-enable TX unit */
1267 		E1000_WRITE_REG(&sc->hw, TCTL, tctl);
1268 		E1000_WRITE_FLUSH(&sc->hw);
1269 
1270 		sc->tx_fifo_head = 0;
1271 		sc->tx_fifo_reset++;
1272 
1273 		return(TRUE);
1274 	}
1275 	else {
1276 		return(FALSE);
1277 	}
1278 }
1279 
1280 void
1281 em_set_promisc(struct em_softc * sc)
1282 {
1283 
1284 	u_int32_t	reg_rctl;
1285 	struct ifnet   *ifp = &sc->interface_data.ac_if;
1286 
1287 	reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1288 
1289 	if (ifp->if_flags & IFF_PROMISC) {
1290 		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1291 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1292 	} else if (ifp->if_flags & IFF_ALLMULTI) {
1293 		reg_rctl |= E1000_RCTL_MPE;
1294 		reg_rctl &= ~E1000_RCTL_UPE;
1295 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1296 	}
1297 
1298 	return;
1299 }
1300 
1301 void
1302 em_disable_promisc(struct em_softc * sc)
1303 {
1304 	u_int32_t	reg_rctl;
1305 
1306 	reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1307 
1308 	reg_rctl &=  (~E1000_RCTL_UPE);
1309 	reg_rctl &=  (~E1000_RCTL_MPE);
1310 	E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1311 
1312 	return;
1313 }
1314 
1315 
1316 /*********************************************************************
1317  *  Multicast Update
1318  *
1319  *  This routine is called whenever multicast address list is updated.
1320  *
1321  **********************************************************************/
1322 
1323 void
1324 em_set_multi(struct em_softc * sc)
1325 {
1326 	u_int32_t reg_rctl = 0;
1327 	u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1328 #ifdef __FreeBSD__
1329 	struct ifmultiaddr  *ifma;
1330 #endif
1331 	int mcnt = 0;
1332 #ifdef __FreeBSD__
1333 	struct ifnet   *ifp = &sc->interface_data.ac_if;
1334 #endif
1335 
1336 	IOCTL_DEBUGOUT("em_set_multi: begin");
1337 
1338 	if (sc->hw.mac_type == em_82542_rev2_0) {
1339 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1340 		if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1341 			em_pci_clear_mwi(&sc->hw);
1342 		}
1343 		reg_rctl |= E1000_RCTL_RST;
1344 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1345 		msec_delay(5);
1346 	}
1347 
1348 #ifdef __FreeBSD__
1349 #if __FreeBSD_version < 500000
1350 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1351 #else
1352 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1353 #endif
1354 		if (ifma->ifma_addr->sa_family != AF_LINK)
1355 			continue;
1356 
1357 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1358 
1359 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1360 		      &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1361 		mcnt++;
1362 	}
1363 #endif /* __FreeBSD__ */
1364 
1365 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1366 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1367 		reg_rctl |= E1000_RCTL_MPE;
1368 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1369 	} else
1370 		em_mc_addr_list_update(&sc->hw, mta, mcnt, 0);
1371 
1372 	if (sc->hw.mac_type == em_82542_rev2_0) {
1373 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1374 		reg_rctl &= ~E1000_RCTL_RST;
1375 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1376 		msec_delay(5);
1377 		if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1378 			em_pci_set_mwi(&sc->hw);
1379 		}
1380 	}
1381 
1382 	return;
1383 }
1384 
1385 
1386 /*********************************************************************
1387  *  Timer routine
1388  *
1389  *  This routine checks for link status and updates statistics.
1390  *
1391  **********************************************************************/
1392 
1393 void
1394 em_local_timer(void *arg)
1395 {
1396 	int s;
1397 	struct ifnet   *ifp;
1398 	struct em_softc * sc = arg;
1399 	ifp = &sc->interface_data.ac_if;
1400 
1401 	s = splimp();
1402 
1403 	em_check_for_link(&sc->hw);
1404 	em_print_link_status(sc);
1405 	em_update_stats_counters(sc);
1406 	if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING) {
1407 		em_print_hw_stats(sc);
1408 	}
1409 	em_smartspeed(sc);
1410 
1411 	timeout_add(&sc->timer_handle, 2*hz);
1412 
1413 	splx(s);
1414 	return;
1415 }
1416 
1417 void
1418 em_print_link_status(struct em_softc * sc)
1419 {
1420 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU) {
1421 		if (sc->link_active == 0) {
1422 			em_get_speed_and_duplex(&sc->hw,
1423 						&sc->link_speed,
1424 						&sc->link_duplex);
1425 			sc->link_active = 1;
1426 			sc->smartspeed = 0;
1427 		}
1428 	} else {
1429 		if (sc->link_active == 1) {
1430 			sc->link_speed = 0;
1431 			sc->link_duplex = 0;
1432 			sc->link_active = 0;
1433 		}
1434 	}
1435 
1436 	return;
1437 }
1438 
1439 /*********************************************************************
1440  *
1441  *  This routine disables all traffic on the sc by issuing a
1442  *  global reset on the MAC and deallocates TX/RX buffers.
1443  *
1444  **********************************************************************/
1445 
1446 void
1447 em_stop(void *arg)
1448 {
1449 	struct ifnet   *ifp;
1450 	struct em_softc * sc = arg;
1451 	ifp = &sc->interface_data.ac_if;
1452 
1453 	INIT_DEBUGOUT("em_stop: begin\n");
1454 	em_disable_intr(sc);
1455 	em_reset_hw(&sc->hw);
1456 	timeout_del(&sc->timer_handle);
1457 	timeout_del(&sc->tx_fifo_timer_handle);
1458 	em_free_transmit_structures(sc);
1459 	em_free_receive_structures(sc);
1460 
1461 
1462 	/* Tell the stack that the interface is no longer active */
1463 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1464 
1465 	return;
1466 }
1467 
1468 
1469 /*********************************************************************
1470  *
1471  *  Determine hardware revision.
1472  *
1473  **********************************************************************/
1474 void
1475 em_identify_hardware(struct em_softc * sc)
1476 {
1477 	u_int32_t reg;
1478 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1479 
1480 	/* Make sure our PCI config space has the necessary stuff set */
1481 	sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
1482 					    PCI_COMMAND_STATUS_REG);
1483 	if (!((sc->hw.pci_cmd_word & PCI_COMMAND_MASTER_ENABLE) &&
1484 	      (sc->hw.pci_cmd_word & PCI_COMMAND_MEM_ENABLE))) {
1485 		printf("%s: Memory Access and/or Bus Master bits were not set!\n",
1486 		       sc->sc_dv.dv_xname);
1487 		sc->hw.pci_cmd_word |=
1488 		(PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE);
1489 		pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1490 			       sc->hw.pci_cmd_word);
1491 	}
1492 
1493 	/* Save off the information about this board */
1494 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1495 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1496 
1497 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1498 	sc->hw.revision_id = PCI_REVISION(reg);
1499 
1500 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1501 
1502 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1503 	sc->hw.subsystem_id = PCI_PRODUCT(reg);
1504 
1505 	/* Identify the MAC */
1506 	if (em_set_mac_type(&sc->hw))
1507 		printf("%s: Unknown MAC Type\n", sc->sc_dv.dv_xname);
1508 
1509 	return;
1510 }
1511 
1512 int
1513 em_allocate_pci_resources(struct em_softc * sc)
1514 {
1515 	int		i, val, rid;
1516 	pci_intr_handle_t	ih;
1517 	const char		*intrstr = NULL;
1518 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1519 	pci_chipset_tag_t	pc = pa->pa_pc;
1520 
1521 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_MMBA);
1522 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1523 		printf(": mmba isn't memory");
1524 		return (ENXIO);
1525 	}
1526 	if (pci_mapreg_map(pa, EM_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
1527 	    &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
1528 	    &sc->osdep.em_membase, &sc->osdep.em_memsize, 0)) {
1529 		printf(": can't find mem space");
1530 		return (ENXIO);
1531 	}
1532 
1533 	if (sc->hw.mac_type > em_82543) {
1534 		/* Figure our where our IO BAR is ? */
1535 		rid = EM_MMBA;
1536 		for (i = 0; i < 5; i++) {
1537 			val = pci_conf_read(pa->pa_pc, pa->pa_tag, rid);
1538 			if (val & 0x00000001) {
1539 				sc->io_rid = rid;
1540 				break;
1541 			}
1542 			rid += 4;
1543 		}
1544 		if (pci_mapreg_map(pa, rid, PCI_MAPREG_TYPE_IO, 0,
1545 				   &sc->osdep.em_iobtag,
1546 				   &sc->osdep.em_iobhandle,
1547 				   &sc->osdep.em_iobase,
1548 				   &sc->osdep.em_iosize, 0)) {
1549 			printf(": can't find io space");
1550 			return (ENXIO);
1551 		}
1552 	}
1553 
1554 	if (pci_intr_map(pa, &ih)) {
1555 		printf(": couldn't map interrupt\n");
1556 		return (ENXIO);
1557 	}
1558 
1559 	intrstr = pci_intr_string(pc, ih);
1560 	sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, em_intr, sc,
1561 					      sc->sc_dv.dv_xname);
1562 	if (sc->sc_intrhand == NULL) {
1563 		printf(": couldn't establish interrupt");
1564 		if (intrstr != NULL)
1565 			printf(" at %s", intrstr);
1566 		printf("\n");
1567 		return (ENXIO);
1568 	}
1569 	printf(": %s", intrstr);
1570 
1571 	sc->hw.back = &sc->osdep;
1572 
1573 	return(0);
1574 }
1575 
1576 void
1577 em_free_pci_resources(struct em_softc* sc)
1578 {
1579 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1580 	pci_chipset_tag_t	pc = pa->pa_pc;
1581 
1582 	if(sc->sc_intrhand)
1583 		pci_intr_disestablish(pc, sc->sc_intrhand);
1584 	sc->sc_intrhand = 0;
1585 
1586 	if(sc->osdep.em_iobase)
1587 		bus_space_unmap(sc->osdep.em_iobtag, sc->osdep.em_iobhandle,
1588 				sc->osdep.em_iosize);
1589 	sc->osdep.em_iobase = 0;
1590 
1591 	if(sc->osdep.em_membase)
1592 		bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
1593 				sc->osdep.em_memsize);
1594 	sc->osdep.em_membase = 0;
1595 
1596 }
1597 
1598 /*********************************************************************
1599  *
1600  *  Initialize the hardware to a configuration as specified by the
1601  *  em_softc structure. The controller is reset, the EEPROM is
1602  *  verified, the MAC address is set, then the shared initialization
1603  *  routines are called.
1604  *
1605  **********************************************************************/
1606 int
1607 em_hardware_init(struct em_softc * sc)
1608 {
1609 	/* Issue a global reset */
1610 	em_reset_hw(&sc->hw);
1611 
1612 	/* When hardware is reset, fifo_head is also reset */
1613 	sc->tx_fifo_head = 0;
1614 
1615 	/* Make sure we have a good EEPROM before we read from it */
1616 	if (em_validate_eeprom_checksum(&sc->hw) < 0) {
1617 		printf("%s: The EEPROM Checksum Is Not Valid\n",
1618 		       sc->sc_dv.dv_xname);
1619 		return(EIO);
1620 	}
1621 
1622 	if (em_read_part_num(&sc->hw, &(sc->part_num)) < 0) {
1623 		printf("%s: EEPROM read error while reading part number\n",
1624 		       sc->sc_dv.dv_xname);
1625 		return(EIO);
1626 	}
1627 
1628 	if (em_init_hw(&sc->hw) < 0) {
1629 		printf("%s: Hardware Initialization Failed",
1630 		       sc->sc_dv.dv_xname);
1631 		return(EIO);
1632 	}
1633 
1634 	em_check_for_link(&sc->hw);
1635 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU)
1636 		sc->link_active = 1;
1637 	else
1638 		sc->link_active = 0;
1639 
1640 	if (sc->link_active) {
1641 		em_get_speed_and_duplex(&sc->hw,
1642 					&sc->link_speed,
1643 					&sc->link_duplex);
1644 	} else {
1645 		sc->link_speed = 0;
1646 		sc->link_duplex = 0;
1647 	}
1648 
1649 	return(0);
1650 }
1651 
1652 /*********************************************************************
1653  *
1654  *  Setup networking device structure and register an interface.
1655  *
1656  **********************************************************************/
1657 void
1658 em_setup_interface(struct em_softc * sc)
1659 {
1660 	struct ifnet   *ifp;
1661 	INIT_DEBUGOUT("em_setup_interface: begin");
1662 
1663 	ifp = &sc->interface_data.ac_if;
1664 	ifp->if_mtu = ETHERMTU;
1665 	ifp->if_output = ether_output;
1666 	ifp->if_baudrate = 1000000000;
1667 #ifdef __FreeBSD__
1668 	ifp->if_init =	em_init;
1669 #endif
1670 	ifp->if_softc = sc;
1671 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1672 	ifp->if_ioctl = em_ioctl;
1673 	ifp->if_start = em_start;
1674 	ifp->if_watchdog = em_watchdog;
1675 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1676 	IFQ_SET_READY(&ifp->if_snd);
1677 
1678 	strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
1679 
1680 #ifdef __FreeBSD__
1681 	if (sc->hw.mac_type >= em_82543) {
1682 		ifp->if_capabilities = IFCAP_HWCSUM;
1683 		ifp->if_capenable = ifp->if_capabilities;
1684 	}
1685 
1686 	/*
1687 	 * Tell the upper layer(s) we support long frames.
1688 	 */
1689 	ifp->if_data.ifi_hdrlen = sizeof(struct ehter_vlan_header);
1690 #if __FreeBSD_version >= 500000
1691 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1692 #endif
1693 #endif /* __FreeBSD__ */
1694 
1695 
1696 	/*
1697 	 * Specify the media types supported by this adapter and register
1698 	 * callbacks to update media and link information
1699 	 */
1700 	ifmedia_init(&sc->media, IFM_IMASK, em_media_change,
1701 		     em_media_status);
1702 	if (sc->hw.media_type == em_media_type_fiber) {
1703 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1704 			    0, NULL);
1705 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX,
1706 			    0, NULL);
1707 	} else {
1708 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1709 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1710 			    0, NULL);
1711 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX,
1712 			    0, NULL);
1713 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1714 			    0, NULL);
1715 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1716 			    0, NULL);
1717 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1718 	}
1719 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1720 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1721 
1722 	if_attach(ifp);
1723 	ether_ifattach(ifp);
1724 
1725 	return;
1726 }
1727 
1728 
1729 /*********************************************************************
1730  *
1731  *  Workaround for SmartSpeed on 82541 and 82547 controllers
1732  *
1733  **********************************************************************/
1734 void
1735 em_smartspeed(struct em_softc *sc)
1736 {
1737 	uint16_t phy_tmp;
1738 
1739 	if(sc->link_active || (sc->hw.phy_type != em_phy_igp) ||
1740 	   !sc->hw.autoneg || !(sc->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1741 		return;
1742 
1743 	if(sc->smartspeed == 0) {
1744 		/* If Master/Slave config fault is asserted twice,
1745 		 * we assume back-to-back */
1746 		em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
1747 		if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
1748 		em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
1749 		if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1750 			em_read_phy_reg(&sc->hw, PHY_1000T_CTRL,
1751 					&phy_tmp);
1752 			if(phy_tmp & CR_1000T_MS_ENABLE) {
1753 				phy_tmp &= ~CR_1000T_MS_ENABLE;
1754 				em_write_phy_reg(&sc->hw,
1755 						    PHY_1000T_CTRL, phy_tmp);
1756 				sc->smartspeed++;
1757 				if(sc->hw.autoneg &&
1758 				   !em_phy_setup_autoneg(&sc->hw) &&
1759 				   !em_read_phy_reg(&sc->hw, PHY_CTRL,
1760 						       &phy_tmp)) {
1761 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
1762 						    MII_CR_RESTART_AUTO_NEG);
1763 					em_write_phy_reg(&sc->hw,
1764 							 PHY_CTRL, phy_tmp);
1765 				}
1766 			}
1767 		}
1768 		return;
1769 	} else if(sc->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1770 		/* If still no link, perhaps using 2/3 pair cable */
1771 		em_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
1772 		phy_tmp |= CR_1000T_MS_ENABLE;
1773 		em_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
1774 		if(sc->hw.autoneg &&
1775 		   !em_phy_setup_autoneg(&sc->hw) &&
1776 		   !em_read_phy_reg(&sc->hw, PHY_CTRL, &phy_tmp)) {
1777 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
1778 				    MII_CR_RESTART_AUTO_NEG);
1779 			em_write_phy_reg(&sc->hw, PHY_CTRL, phy_tmp);
1780 		}
1781 	}
1782 	/* Restart process after EM_SMARTSPEED_MAX iterations */
1783 	if(sc->smartspeed++ == EM_SMARTSPEED_MAX)
1784 		sc->smartspeed = 0;
1785 
1786 	return;
1787 }
1788 
1789 
1790 /*
1791  * Manage DMA'able memory.
1792  */
1793 
1794 #ifdef __FreeBSD__
1795 void
1796 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1797 {
1798 	if (error)
1799 		return;
1800 	*(bus_addr_t*) arg = segs->ds_addr;
1801 	return;
1802 }
1803 #endif /* __FreeBSD__ */
1804 
1805 int
1806 em_dma_malloc(struct em_softc *sc, bus_size_t size,
1807 	struct em_dma_alloc *dma, int mapflags)
1808 {
1809 	int r;
1810 
1811 #ifdef __FreeBSD__
1812 	r = bus_dma_tag_create(NULL,			/* parent */
1813 			       PAGE_SIZE, 0,		/* alignment, bounds */
1814 			       BUS_SPACE_MAXADDR,	/* lowaddr */
1815 			       BUS_SPACE_MAXADDR,	/* highaddr */
1816 			       NULL, NULL,		/* filter, filterarg */
1817 			       size,			/* maxsize */
1818 			       1,			/* nsegments */
1819 			       size,			/* maxsegsize */
1820 			       BUS_DMA_ALLOCNOW,	/* flags */
1821 			       &dma->dma_tag);
1822 	if (r != 0) {
1823 		printf("%s: em_dma_malloc: bus_dma_tag_create failed; "
1824 			"error %u\n", sc->sc_dv.dv_xname, r);
1825 		goto fail_0;
1826 	}
1827 
1828 	r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map);
1829 #endif /* __FreeBSD__ */
1830 	dma->dma_tag = sc->osdep.em_pa.pa_dmat;
1831 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1832 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1833 
1834 	if (r != 0) {
1835 		printf("%s: em_dma_malloc: bus_dmamap_create failed; "
1836 			"error %u\n", sc->sc_dv.dv_xname, r);
1837 		goto fail_0;
1838 	}
1839 
1840 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1841 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1842 	if (r != 0) {
1843 		printf("%s: em_dma_malloc: bus_dmammem_alloc failed; "
1844 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
1845 			(unsigned long)size, r);
1846 		goto fail_1;
1847 	}
1848 
1849 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1850 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1851 	if (r != 0) {
1852 		printf("%s: em_dma_malloc: bus_dmammem_map failed; "
1853 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
1854 			(unsigned long)size, r);
1855 		goto fail_2;
1856 	}
1857 
1858 	r = bus_dmamap_load(sc->osdep.em_pa.pa_dmat, dma->dma_map,
1859 			    dma->dma_vaddr,
1860 			    size,
1861 			    NULL,
1862 			    mapflags | BUS_DMA_NOWAIT);
1863 	if (r != 0) {
1864 		printf("%s: em_dma_malloc: bus_dmamap_load failed; "
1865 			"error %u\n", sc->sc_dv.dv_xname, r);
1866 		goto fail_3;
1867 	}
1868 
1869 	dma->dma_size = size;
1870 	return (0);
1871 
1872 /* fail_4: */
1873 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1874 fail_3:
1875 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1876 fail_2:
1877 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1878 fail_1:
1879 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1880 	/* bus_dma_tag_destroy(dma->dma_tag); */
1881 fail_0:
1882 	dma->dma_map = NULL;
1883 	/* dma->dma_tag = NULL; */
1884 	return (r);
1885 }
1886 
1887 void
1888 em_dma_free(struct em_softc *sc, struct em_dma_alloc *dma)
1889 {
1890 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1891 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1892 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1893 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1894 	/* bus_dma_tag_destroy(dma->dma_tag); */
1895 }
1896 
1897 
1898 /*********************************************************************
1899  *
1900  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1901  *  the information needed to transmit a packet on the wire.
1902  *
1903  **********************************************************************/
1904 int
1905 em_allocate_transmit_structures(struct em_softc * sc)
1906 {
1907 	if (!(sc->tx_buffer_area =
1908 	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
1909 					     sc->num_tx_desc, M_DEVBUF,
1910 					     M_NOWAIT))) {
1911 		printf("%s: Unable to allocate tx_buffer memory\n",
1912 		       sc->sc_dv.dv_xname);
1913 		return ENOMEM;
1914 	}
1915 
1916 	bzero(sc->tx_buffer_area,
1917 	      sizeof(struct em_buffer) * sc->num_tx_desc);
1918 
1919 	return 0;
1920 }
1921 
1922 /*********************************************************************
1923  *
1924  *  Allocate and initialize transmit structures.
1925  *
1926  **********************************************************************/
1927 int
1928 em_setup_transmit_structures(struct em_softc* sc)
1929 {
1930 #ifdef __FreeBSD__
1931 	/*
1932 	 * Setup DMA descriptor areas.
1933 	 */
1934 	if (bus_dma_tag_create(NULL,	/* parent */
1935 		    PAGE_SIZE, 0,	/* alignment, bounds */
1936 		    BUS_SPACE_MAXADDR,       /* lowaddr */
1937 		    BUS_SPACE_MAXADDR,       /* highaddr */
1938 		    NULL, NULL,              /* filter, filterarg */
1939 		    MCLBYTES * 8,            /* maxsize */
1940 		    EM_MAX_SCATTER,          /* nsegments */
1941 		    MCLBYTES * 8,            /* maxsegsize */
1942 		    BUS_DMA_ALLOCNOW,        /* flags */
1943 		    &sc->txtag)) {
1944 		printf("%s: Unable to allocate TX DMA tag\n", sc->sc_dv.dv_xname);
1945 		return (ENOMEM);
1946 	}
1947 
1948 #endif /* __FreeBSD__ */
1949 	sc->txtag = sc->osdep.em_pa.pa_dmat;
1950 
1951 	if (em_allocate_transmit_structures(sc))
1952 		return (ENOMEM);
1953 
1954 	bzero((void *) sc->tx_desc_base,
1955 	      (sizeof(struct em_tx_desc)) * sc->num_tx_desc);
1956 
1957 	sc->next_avail_tx_desc = 0;
1958 	sc->oldest_used_tx_desc = 0;
1959 
1960 	/* Set number of descriptors available */
1961 	sc->num_tx_desc_avail = sc->num_tx_desc;
1962 
1963 	/* Set checksum context */
1964 	sc->active_checksum_context = OFFLOAD_NONE;
1965 
1966 	return (0);
1967 }
1968 
1969 /*********************************************************************
1970  *
1971  *  Enable transmit unit.
1972  *
1973  **********************************************************************/
1974 void
1975 em_initialize_transmit_unit(struct em_softc * sc)
1976 {
1977 	u_int32_t	reg_tctl;
1978 	u_int32_t	reg_tipg = 0;
1979 	u_int64_t	bus_addr;
1980 
1981 	/* Setup the Base and Length of the Tx Descriptor Ring */
1982 	bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
1983 	E1000_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
1984 	E1000_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
1985 	E1000_WRITE_REG(&sc->hw, TDLEN,
1986 			sc->num_tx_desc *
1987 			sizeof(struct em_tx_desc));
1988 
1989 	/* Setup the HW Tx Head and Tail descriptor pointers */
1990 	E1000_WRITE_REG(&sc->hw, TDH, 0);
1991 	E1000_WRITE_REG(&sc->hw, TDT, 0);
1992 
1993 
1994 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
1995 		     E1000_READ_REG(&sc->hw, TDBAL),
1996 		     E1000_READ_REG(&sc->hw, TDLEN));
1997 
1998 	/* Set the default values for the Tx Inter Packet Gap timer */
1999 	switch (sc->hw.mac_type) {
2000 	case em_82542_rev2_0:
2001 	case em_82542_rev2_1:
2002 		reg_tipg = DEFAULT_82542_TIPG_IPGT;
2003 		reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2004 		reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2005 		break;
2006 	default:
2007 		if (sc->hw.media_type == em_media_type_fiber)
2008 			reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2009 		else
2010 			reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2011 			reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2012 			reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2013 	}
2014 
2015 	E1000_WRITE_REG(&sc->hw, TIPG, reg_tipg);
2016 	E1000_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
2017 	if(sc->hw.mac_type >= em_82540)
2018 		E1000_WRITE_REG(&sc->hw, TADV, sc->tx_abs_int_delay);
2019 
2020 	/* Program the Transmit Control Register */
2021 	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2022 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2023 	if (sc->link_duplex == 1) {
2024 		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2025 	} else {
2026 		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2027 	}
2028 	E1000_WRITE_REG(&sc->hw, TCTL, reg_tctl);
2029 
2030 	/* Setup Transmit Descriptor Settings for this adapter */
2031 	sc->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2032 
2033 	if (sc->tx_int_delay > 0)
2034 		sc->txd_cmd |= E1000_TXD_CMD_IDE;
2035 
2036 	return;
2037 }
2038 
2039 /*********************************************************************
2040  *
2041  *  Free all transmit related data structures.
2042  *
2043  **********************************************************************/
2044 void
2045 em_free_transmit_structures(struct em_softc* sc)
2046 {
2047 	struct em_buffer   *tx_buffer;
2048 	int		i;
2049 
2050 	INIT_DEBUGOUT("free_transmit_structures: begin");
2051 
2052 	if (sc->tx_buffer_area != NULL) {
2053 		tx_buffer = sc->tx_buffer_area;
2054 		for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2055 			if (tx_buffer->m_head != NULL) {
2056 				bus_dmamap_unload(sc->txtag, tx_buffer->map);
2057 				bus_dmamap_destroy(sc->txtag, tx_buffer->map);
2058 				m_freem(tx_buffer->m_head);
2059 			}
2060 			tx_buffer->m_head = NULL;
2061 		}
2062 	}
2063 	if (sc->tx_buffer_area != NULL) {
2064 		free(sc->tx_buffer_area, M_DEVBUF);
2065 		sc->tx_buffer_area = NULL;
2066 	}
2067 	if (sc->txtag != NULL) {
2068 #ifdef __FreeBSD__
2069 		bus_dma_tag_destroy(sc->txtag);
2070 #endif
2071 		sc->txtag = NULL;
2072 	}
2073 	return;
2074 }
2075 
2076 /*********************************************************************
2077  *
2078  *  The offload context needs to be set when we transfer the first
2079  *  packet of a particular protocol (TCP/UDP). We change the
2080  *  context only if the protocol type changes.
2081  *
2082  **********************************************************************/
2083 #ifdef __FreeBSD__
2084 void
2085 em_transmit_checksum_setup(struct em_softc * sc,
2086 			   struct mbuf *mp,
2087 			   u_int32_t *txd_upper,
2088 			   u_int32_t *txd_lower)
2089 {
2090 	struct em_context_desc *TXD;
2091 	struct em_buffer *tx_buffer;
2092 	int curr_txd;
2093 
2094 	if (mp->m_pkthdr.csum_flags) {
2095 
2096 		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2097 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2098 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2099 			if (sc->active_checksum_context == OFFLOAD_TCP_IP)
2100 				return;
2101 			else
2102 				sc->active_checksum_context = OFFLOAD_TCP_IP;
2103 
2104 		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2105 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2106 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2107 			if (sc->active_checksum_context == OFFLOAD_UDP_IP)
2108 				return;
2109 			else
2110 				sc->active_checksum_context = OFFLOAD_UDP_IP;
2111 		} else {
2112 			*txd_upper = 0;
2113 			*txd_lower = 0;
2114 			return;
2115 		}
2116 	} else {
2117 		*txd_upper = 0;
2118 		*txd_lower = 0;
2119 		return;
2120 	}
2121 
2122 	/* If we reach this point, the checksum offload context
2123 	 * needs to be reset.
2124 	 */
2125 	curr_txd = sc->next_avail_tx_desc;
2126 	tx_buffer = &sc->tx_buffer_area[curr_txd];
2127 	TXD = (struct em_context_desc *) &sc->tx_desc_base[curr_txd];
2128 
2129 	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2130 	TXD->lower_setup.ip_fields.ipcso =
2131 	ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2132 	TXD->lower_setup.ip_fields.ipcse =
2133 	    htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2134 
2135 	TXD->upper_setup.tcp_fields.tucss =
2136 	ETHER_HDR_LEN + sizeof(struct ip);
2137 	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2138 
2139 	if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
2140 		TXD->upper_setup.tcp_fields.tucso =
2141 		ETHER_HDR_LEN + sizeof(struct ip) +
2142 		offsetof(struct tcphdr, th_sum);
2143 	} else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
2144 		TXD->upper_setup.tcp_fields.tucso =
2145 		ETHER_HDR_LEN + sizeof(struct ip) +
2146 		offsetof(struct udphdr, uh_sum);
2147 	}
2148 
2149 	TXD->tcp_seg_setup.data = htole32(0);
2150 	TXD->cmd_and_length = htole32(sc->txd_cmd | E1000_TXD_CMD_DEXT);
2151 
2152 	tx_buffer->m_head = NULL;
2153 
2154 	if (++curr_txd == sc->num_tx_desc)
2155 		curr_txd = 0;
2156 
2157 	sc->num_tx_desc_avail--;
2158 	sc->next_avail_tx_desc = curr_txd;
2159 
2160 	return;
2161 }
2162 #endif /* __FreeBSD__ */
2163 
2164 /**********************************************************************
2165  *
2166  *  Examine each tx_buffer in the used queue. If the hardware is done
2167  *  processing the packet then free associated resources. The
2168  *  tx_buffer is put back on the free queue.
2169  *
2170  **********************************************************************/
2171 void
2172 em_clean_transmit_interrupts(struct em_softc* sc)
2173 {
2174 	int s;
2175 	int i, num_avail;
2176 	struct em_buffer *tx_buffer;
2177 	struct em_tx_desc   *tx_desc;
2178 	struct ifnet   *ifp = &sc->interface_data.ac_if;
2179 
2180 	if (sc->num_tx_desc_avail == sc->num_tx_desc)
2181 		return;
2182 
2183 	s = splimp();
2184 #ifdef DBG_STATS
2185 	sc->clean_tx_interrupts++;
2186 #endif
2187 	num_avail = sc->num_tx_desc_avail;
2188 	i = sc->oldest_used_tx_desc;
2189 
2190 	tx_buffer = &sc->tx_buffer_area[i];
2191 	tx_desc = &sc->tx_desc_base[i];
2192 
2193 	while(tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2194 
2195 		tx_desc->upper.data = 0;
2196 		num_avail++;
2197 
2198 		if (tx_buffer->m_head) {
2199 			ifp->if_opackets++;
2200 			bus_dmamap_sync(sc->txtag, tx_buffer->map,
2201 			    0, tx_buffer->map->dm_mapsize,
2202 			    BUS_DMASYNC_POSTWRITE);
2203 			bus_dmamap_unload(sc->txtag, tx_buffer->map);
2204 			bus_dmamap_destroy(sc->txtag, tx_buffer->map);
2205 
2206 			m_freem(tx_buffer->m_head);
2207 			tx_buffer->m_head = NULL;
2208 		}
2209 
2210 		if (++i == sc->num_tx_desc)
2211 			i = 0;
2212 
2213 		tx_buffer = &sc->tx_buffer_area[i];
2214 		tx_desc = &sc->tx_desc_base[i];
2215 	}
2216 
2217 	sc->oldest_used_tx_desc = i;
2218 
2219 	/*
2220 	 * If we have enough room, clear IFF_OACTIVE to tell the stack
2221 	 * that it is OK to send packets.
2222 	 * If there are no pending descriptors, clear the timeout. Otherwise,
2223 	 * if some descriptors have been freed, restart the timeout.
2224 	 */
2225 	if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2226 		ifp->if_flags &= ~IFF_OACTIVE;
2227 		if (num_avail == sc->num_tx_desc)
2228 			ifp->if_timer = 0;
2229 		else if (num_avail == sc->num_tx_desc_avail)
2230 			ifp->if_timer = EM_TX_TIMEOUT;
2231 	}
2232 	sc->num_tx_desc_avail = num_avail;
2233 	splx(s);
2234 	return;
2235 }
2236 
2237 /*********************************************************************
2238  *
2239  *  Get a buffer from system mbuf buffer pool.
2240  *
2241  **********************************************************************/
2242 int
2243 em_get_buf(int i, struct em_softc *sc,
2244     struct mbuf *nmp)
2245 {
2246 	struct mbuf    *mp = nmp;
2247 	struct em_buffer *rx_buffer;
2248 	struct ifnet   *ifp;
2249 	int error;
2250 
2251 	ifp = &sc->interface_data.ac_if;
2252 
2253 	if (mp == NULL) {
2254 		MGETHDR(mp, M_DONTWAIT, MT_DATA);
2255 		if (mp == NULL) {
2256 			sc->mbuf_alloc_failed++;
2257 			return(ENOBUFS);
2258 		}
2259 		MCLGET(mp, M_DONTWAIT);
2260 		if ((mp->m_flags & M_EXT) == 0) {
2261 			m_freem(mp);
2262 			sc->mbuf_cluster_failed++;
2263 			return(ENOBUFS);
2264 		}
2265 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2266 	} else {
2267 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2268 		mp->m_data = mp->m_ext.ext_buf;
2269 		mp->m_next = NULL;
2270 	}
2271 
2272 	if (ifp->if_mtu <= ETHERMTU) {
2273 		m_adj(mp, ETHER_ALIGN);
2274 	}
2275 
2276 	rx_buffer = &sc->rx_buffer_area[i];
2277 
2278 	/*
2279 	 * Using memory from the mbuf cluster pool, invoke the
2280 	 * bus_dma machinery to arrange the memory mapping.
2281 	 */
2282 	error = bus_dmamap_load(sc->rxtag, rx_buffer->map,
2283 	    mtod(mp, void *), mp->m_len, NULL,
2284 	    0);
2285 	if (error) {
2286 		m_free(mp);
2287 		return(error);
2288 	}
2289 	rx_buffer->m_head = mp;
2290 	sc->rx_desc_base[i].buffer_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr);
2291 	bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0,
2292 	    rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD);
2293 
2294 	return(0);
2295 }
2296 
2297 /*********************************************************************
2298  *
2299  *  Allocate memory for rx_buffer structures. Since we use one
2300  *  rx_buffer per received packet, the maximum number of rx_buffer's
2301  *  that we'll need is equal to the number of receive descriptors
2302  *  that we've allocated.
2303  *
2304  **********************************************************************/
2305 int
2306 em_allocate_receive_structures(struct em_softc* sc)
2307 {
2308 	int		i, error;
2309 	struct em_buffer *rx_buffer;
2310 
2311 	if (!(sc->rx_buffer_area =
2312 	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2313 					     sc->num_rx_desc, M_DEVBUF,
2314 					     M_NOWAIT))) {
2315 		printf("%s: Unable to allocate rx_buffer memory\n",
2316 		       sc->sc_dv.dv_xname);
2317 		return(ENOMEM);
2318 	}
2319 
2320 	bzero(sc->rx_buffer_area,
2321 	      sizeof(struct em_buffer) * sc->num_rx_desc);
2322 
2323 #ifdef __FreeBSD__
2324 	error = bus_dma_tag_create(NULL,                /* parent */
2325 				PAGE_SIZE, 0,            /* alignment, bounds */
2326 				BUS_SPACE_MAXADDR,       /* lowaddr */
2327 				BUS_SPACE_MAXADDR,       /* highaddr */
2328 				NULL, NULL,              /* filter, filterarg */
2329 				MCLBYTES,                /* maxsize */
2330 				1,                       /* nsegments */
2331 				MCLBYTES,                /* maxsegsize */
2332 				BUS_DMA_ALLOCNOW,        /* flags */
2333 				&sc->rxtag);
2334 	if (error != 0) {
2335 		printf("%s: em_allocate_receive_structures: "
2336 			"bus_dma_tag_create failed; error %u\n",
2337 			sc->sc_dv.dv_xname, error);
2338 		goto fail_0;
2339 	}
2340 #endif /* __FreeBSD__ */
2341 	sc->rxtag = sc->osdep.em_pa.pa_dmat;
2342 
2343 	rx_buffer = sc->rx_buffer_area;
2344 	for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
2345 		error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
2346 					MCLBYTES, 0, BUS_DMA_NOWAIT,
2347 					&rx_buffer->map);
2348 		if (error != 0) {
2349 			printf("%s: em_allocate_receive_structures: "
2350 			    "bus_dmamap_create failed; error %u\n",
2351 			    sc->sc_dv.dv_xname, error);
2352 			goto fail_1;
2353 		}
2354 	}
2355 
2356 	for (i = 0; i < sc->num_rx_desc; i++) {
2357 		error = em_get_buf(i, sc, NULL);
2358 		if (error != 0) {
2359 			sc->rx_buffer_area[i].m_head = NULL;
2360 			sc->rx_desc_base[i].buffer_addr = 0;
2361 			return(error);
2362                 }
2363         }
2364 
2365         return(0);
2366 
2367 fail_1:
2368 	/* bus_dma_tag_destroy(sc->rxtag); */
2369 /* fail_0: */
2370 	sc->rxtag = NULL;
2371 	free(sc->rx_buffer_area, M_DEVBUF);
2372 	sc->rx_buffer_area = NULL;
2373 	return (error);
2374 }
2375 
2376 /*********************************************************************
2377  *
2378  *  Allocate and initialize receive structures.
2379  *
2380  **********************************************************************/
2381 int
2382 em_setup_receive_structures(struct em_softc * sc)
2383 {
2384 	bzero((void *) sc->rx_desc_base,
2385 	    (sizeof(struct em_rx_desc)) * sc->num_rx_desc);
2386 
2387 	if (em_allocate_receive_structures(sc))
2388 		return ENOMEM;
2389 
2390 	/* Setup our descriptor pointers */
2391 	sc->next_rx_desc_to_check = 0;
2392 	return(0);
2393 }
2394 
2395 /*********************************************************************
2396  *
2397  *  Enable receive unit.
2398  *
2399  **********************************************************************/
2400 void
2401 em_initialize_receive_unit(struct em_softc * sc)
2402 {
2403 	u_int32_t	reg_rctl;
2404 #ifdef __FreeBSD__
2405 	u_int32_t	reg_rxcsum;
2406 #endif
2407 	struct ifnet	*ifp;
2408 	u_int64_t	bus_addr;
2409 
2410 	ifp = &sc->interface_data.ac_if;
2411 
2412 	/* Make sure receives are disabled while setting up the descriptor ring */
2413 	E1000_WRITE_REG(&sc->hw, RCTL, 0);
2414 
2415 	/* Set the Receive Delay Timer Register */
2416 	E1000_WRITE_REG(&sc->hw, RDTR,
2417 			sc->rx_int_delay | E1000_RDT_FPDB);
2418 
2419 	if(sc->hw.mac_type >= em_82540) {
2420 		E1000_WRITE_REG(&sc->hw, RADV, sc->rx_abs_int_delay);
2421 
2422 		/* Set the interrupt throttling rate.  Value is calculated
2423 		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2424 #define MAX_INTS_PER_SEC	8000
2425 #define DEFAULT_ITR		1000000000/(MAX_INTS_PER_SEC * 256)
2426 		E1000_WRITE_REG(&sc->hw, ITR, DEFAULT_ITR);
2427 	}
2428 
2429 	/* Setup the Base and Length of the Rx Descriptor Ring */
2430 	bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
2431 	E1000_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
2432 	E1000_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2433 	E1000_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
2434 			sizeof(struct em_rx_desc));
2435 
2436 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2437 	E1000_WRITE_REG(&sc->hw, RDH, 0);
2438 	E1000_WRITE_REG(&sc->hw, RDT, sc->num_rx_desc - 1);
2439 
2440 	/* Setup the Receive Control Register */
2441 	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2442 		   E1000_RCTL_RDMTS_HALF |
2443 		   (sc->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2444 
2445 	if (sc->hw.tbi_compatibility_on == TRUE)
2446 		reg_rctl |= E1000_RCTL_SBP;
2447 
2448 
2449 	switch (sc->rx_buffer_len) {
2450 	default:
2451 	case EM_RXBUFFER_2048:
2452 		reg_rctl |= E1000_RCTL_SZ_2048;
2453 		break;
2454 	case EM_RXBUFFER_4096:
2455 		reg_rctl |= E1000_RCTL_SZ_4096|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2456 		break;
2457 	case EM_RXBUFFER_8192:
2458 		reg_rctl |= E1000_RCTL_SZ_8192|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2459 		break;
2460 	case EM_RXBUFFER_16384:
2461 		reg_rctl |= E1000_RCTL_SZ_16384|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2462 		break;
2463 	}
2464 
2465 	if (ifp->if_mtu > ETHERMTU)
2466 		reg_rctl |= E1000_RCTL_LPE;
2467 
2468 #ifdef __FreeBSD__
2469 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2470 	if ((sc->hw.mac_type >= em_82543) &&
2471 	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2472 		reg_rxcsum = E1000_READ_REG(&sc->hw, RXCSUM);
2473 		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2474 		E1000_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
2475 	}
2476 #endif /* __FreeBSD__ */
2477 
2478 	/* Enable Receives */
2479 	E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
2480 
2481 	return;
2482 }
2483 
2484 /*********************************************************************
2485  *
2486  *  Free receive related data structures.
2487  *
2488  **********************************************************************/
2489 void
2490 em_free_receive_structures(struct em_softc * sc)
2491 {
2492 	struct em_buffer   *rx_buffer;
2493 	int		i;
2494 
2495 	INIT_DEBUGOUT("free_receive_structures: begin");
2496 
2497 	if (sc->rx_buffer_area != NULL) {
2498 		rx_buffer = sc->rx_buffer_area;
2499 		for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
2500 			if (rx_buffer->map != NULL) {
2501 				bus_dmamap_unload(sc->rxtag, rx_buffer->map);
2502 				bus_dmamap_destroy(sc->rxtag, rx_buffer->map);
2503 			}
2504 			if (rx_buffer->m_head != NULL)
2505 				m_freem(rx_buffer->m_head);
2506 			rx_buffer->m_head = NULL;
2507 		}
2508 	}
2509 	if (sc->rx_buffer_area != NULL) {
2510 		free(sc->rx_buffer_area, M_DEVBUF);
2511 		sc->rx_buffer_area = NULL;
2512 	}
2513 	if (sc->rxtag != NULL) {
2514 		/* bus_dma_tag_destroy(sc->rxtag); */
2515 		sc->rxtag = NULL;
2516 	}
2517 	return;
2518 }
2519 
2520 /*********************************************************************
2521  *
2522  *  This routine executes in interrupt context. It replenishes
2523  *  the mbufs in the descriptor and sends data which has been
2524  *  dma'ed into host memory to upper layer.
2525  *
2526  *  We loop at most count times if count is > 0, or until done if
2527  *  count < 0.
2528  *
2529  *********************************************************************/
2530 void
2531 em_process_receive_interrupts(struct em_softc* sc, int count)
2532 {
2533 	struct ifnet	    *ifp;
2534 	struct mbuf	    *mp;
2535 	struct ether_header *eh;
2536 	u_int8_t	    accept_frame = 0;
2537 	u_int8_t	    eop = 0;
2538 	u_int16_t	    len, desc_len;
2539 	int		    i;
2540 
2541 	/* Pointer to the receive descriptor being examined. */
2542 	struct em_rx_desc   *current_desc;
2543 
2544 	ifp = &sc->interface_data.ac_if;
2545 	i = sc->next_rx_desc_to_check;
2546 	current_desc = &sc->rx_desc_base[i];
2547 
2548 	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2549 #ifdef DBG_STATS
2550 		sc->no_pkts_avail++;
2551 #endif
2552 		return;
2553 	}
2554 
2555 	while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2556 
2557 		mp = sc->rx_buffer_area[i].m_head;
2558 		bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map,
2559 		    0, sc->rx_buffer_area[i].map->dm_mapsize,
2560 		    BUS_DMASYNC_POSTREAD);
2561 		bus_dmamap_unload(sc->rxtag, sc->rx_buffer_area[i].map);
2562 
2563 		accept_frame = 1;
2564 		desc_len = letoh16(current_desc->length);
2565 		if (current_desc->status & E1000_RXD_STAT_EOP) {
2566 			count--;
2567 			eop = 1;
2568 			len = desc_len - ETHER_CRC_LEN;
2569 		} else {
2570 			eop = 0;
2571 			len = desc_len;
2572 		}
2573 
2574 		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2575 			u_int8_t last_byte;
2576 			u_int32_t pkt_len = desc_len;
2577 
2578 			if (sc->fmp != NULL)
2579 				pkt_len += sc->fmp->m_pkthdr.len;
2580 
2581 			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2582 
2583 			if (TBI_ACCEPT(&sc->hw, current_desc->status,
2584 				       current_desc->errors,
2585 				       pkt_len, last_byte)) {
2586 				em_tbi_adjust_stats(&sc->hw,
2587 						    &sc->stats,
2588 						    pkt_len,
2589 						    sc->hw.mac_addr);
2590 				len--;
2591 			}
2592 			else {
2593 				accept_frame = 0;
2594 			}
2595 		}
2596 
2597 		if (accept_frame) {
2598 
2599 			if (em_get_buf(i, sc, NULL) == ENOBUFS) {
2600 				sc->dropped_pkts++;
2601 				em_get_buf(i, sc, mp);
2602 				if (sc->fmp != NULL)
2603 					m_freem(sc->fmp);
2604 				sc->fmp = NULL;
2605 				sc->lmp = NULL;
2606 				break;
2607 			}
2608 
2609 			/* Assign correct length to the current fragment */
2610 			mp->m_len = len;
2611 
2612 			if (sc->fmp == NULL) {
2613 				mp->m_pkthdr.len = len;
2614 				sc->fmp = mp;	 /* Store the first mbuf */
2615 				sc->lmp = mp;
2616 			} else {
2617 				/* Chain mbuf's together */
2618 				mp->m_flags &= ~M_PKTHDR;
2619 				sc->lmp->m_next = mp;
2620 				sc->lmp = sc->lmp->m_next;
2621 				sc->fmp->m_pkthdr.len += len;
2622 			}
2623 
2624 			if (eop) {
2625 				sc->fmp->m_pkthdr.rcvif = ifp;
2626 				ifp->if_ipackets++;
2627 
2628 #if NBPFILTER > 0
2629 				/*
2630 				 * Handle BPF listeners. Let the BPF
2631 				 * user see the packet.
2632 				 */
2633 				if (ifp->if_bpf)
2634 					bpf_mtap(ifp->if_bpf, sc->fmp);
2635 #endif
2636 
2637 				eh = mtod(sc->fmp, struct ether_header *);
2638 				/* Remove ethernet header from mbuf */
2639 				m_adj(sc->fmp, sizeof(struct ether_header));
2640 				em_receive_checksum(sc, current_desc,
2641 						sc->fmp);
2642 
2643 #ifdef __FreeBSD__
2644 				if (current_desc->status & E1000_RXD_STAT_VP)
2645 					VLAN_INPUT_TAG(eh, sc->fmp,
2646 					    (letoh16(current_desc->special) &
2647 					    E1000_RXD_SPC_VLAN_MASK));
2648 				else
2649 #endif /* __FreeBSD__ */
2650 					ether_input(ifp, eh, sc->fmp);
2651 
2652 				sc->fmp = NULL;
2653 				sc->lmp = NULL;
2654 			}
2655 		} else {
2656 			sc->dropped_pkts++;
2657 			em_get_buf(i, sc, mp);
2658 			if (sc->fmp != NULL)
2659 				m_freem(sc->fmp);
2660 			sc->fmp = NULL;
2661 			sc->lmp = NULL;
2662 		}
2663 
2664 		/* Zero out the receive descriptors status  */
2665 		current_desc->status = 0;
2666 
2667 		/* Advance the E1000's Receive Queue #0	 "Tail Pointer". */
2668 		E1000_WRITE_REG(&sc->hw, RDT, i);
2669 
2670 		/* Advance our pointers to the next descriptor */
2671 		if (++i == sc->num_rx_desc) {
2672 			i = 0;
2673 			current_desc = sc->rx_desc_base;
2674 		} else
2675 			current_desc++;
2676 	}
2677 	sc->next_rx_desc_to_check = i;
2678 	return;
2679 }
2680 
2681 /*********************************************************************
2682  *
2683  *  Verify that the hardware indicated that the checksum is valid.
2684  *  Inform the stack about the status of checksum so that stack
2685  *  doesn't spend time verifying the checksum.
2686  *
2687  *********************************************************************/
2688 void
2689 em_receive_checksum(struct em_softc *sc,
2690 		    struct em_rx_desc *rx_desc,
2691 		    struct mbuf *mp)
2692 {
2693 #ifdef __FreeBSD__
2694 	/* 82543 or newer only */
2695 	if ((sc->hw.mac_type < em_82543) ||
2696 	    /* Ignore Checksum bit is set */
2697 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2698 		mp->m_pkthdr.csum_flags = 0;
2699 		return;
2700 	}
2701 
2702 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2703 		/* Did it pass? */
2704 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2705 			/* IP Checksum Good */
2706 			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2707 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2708 
2709 		} else {
2710 			mp->m_pkthdr.csum_flags = 0;
2711 		}
2712 	}
2713 
2714 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2715 		/* Did it pass? */
2716 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2717 			mp->m_pkthdr.csum_flags |=
2718 			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2719 			mp->m_pkthdr.csum_data = htons(0xffff);
2720 		}
2721 	}
2722 
2723 	return;
2724 #endif /* __FreeBSD__ */
2725 }
2726 
2727 
2728 void em_enable_vlans(struct em_softc * sc)
2729 {
2730 	uint32_t ctrl;
2731 
2732 	E1000_WRITE_REG(&sc->hw, VET, ETHERTYPE_8021Q);
2733 
2734 	ctrl = E1000_READ_REG(&sc->hw, CTRL);
2735 	ctrl |= E1000_CTRL_VME;
2736 	E1000_WRITE_REG(&sc->hw, CTRL, ctrl);
2737 
2738 	return;
2739 }
2740 
2741 void
2742 em_enable_intr(struct em_softc* sc)
2743 {
2744 	E1000_WRITE_REG(&sc->hw, IMS, (IMS_ENABLE_MASK));
2745 	return;
2746 }
2747 
2748 void
2749 em_disable_intr(struct em_softc *sc)
2750 {
2751 	E1000_WRITE_REG(&sc->hw, IMC,
2752 			(0xffffffff & ~E1000_IMC_RXSEQ));
2753 	return;
2754 }
2755 
2756 int
2757 em_is_valid_ether_addr(u_int8_t *addr)
2758 {
2759 	const char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2760 
2761 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
2762 		return (FALSE);
2763 	}
2764 
2765 	return(TRUE);
2766 }
2767 
2768 void
2769 em_write_pci_cfg(struct em_hw *hw,
2770 		      uint32_t reg,
2771 		      uint16_t *value)
2772 {
2773 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
2774 	pci_chipset_tag_t pc = pa->pa_pc;
2775 	/* Should we do read/mask/write...?  16 vs 32 bit!!! */
2776 	pci_conf_write(pc, pa->pa_tag, reg, *value);
2777 }
2778 
2779 void
2780 em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
2781 		     uint16_t *value)
2782 {
2783 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
2784 	pci_chipset_tag_t pc = pa->pa_pc;
2785 	*value = pci_conf_read(pc, pa->pa_tag, reg);
2786 	return;
2787 }
2788 
2789 void
2790 em_pci_set_mwi(struct em_hw *hw)
2791 {
2792 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
2793 	pci_chipset_tag_t pc = pa->pa_pc;
2794 	/* Should we do read/mask/write...?  16 vs 32 bit!!! */
2795 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
2796 		(hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE));
2797 
2798 }
2799 
2800 void
2801 em_pci_clear_mwi(struct em_hw *hw)
2802 {
2803 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
2804 	pci_chipset_tag_t pc = pa->pa_pc;
2805 	/* Should we do read/mask/write...?  16 vs 32 bit!!! */
2806 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
2807 		(hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE));
2808 
2809 }
2810 
2811 uint32_t
2812 em_io_read(struct em_hw *hw, uint32_t port)
2813 {
2814 	return bus_space_read_4(((struct em_osdep *)(hw)->back)->em_iobtag,
2815 		((struct em_osdep *)(hw)->back)->em_iobhandle, port);
2816 }
2817 
2818 void
2819 em_io_write(struct em_hw *hw, uint32_t port, uint32_t value)
2820 {
2821 	bus_space_write_4(((struct em_osdep *)(hw)->back)->em_iobtag,
2822 			((struct em_osdep *)(hw)->back)->em_iobhandle, port,
2823 			value);
2824 	return;
2825 }
2826 
2827 /**********************************************************************
2828  *
2829  *  Update the board statistics counters.
2830  *
2831  **********************************************************************/
2832 void
2833 em_update_stats_counters(struct em_softc *sc)
2834 {
2835 	struct ifnet   *ifp;
2836 
2837 	sc->stats.crcerrs += E1000_READ_REG(&sc->hw, CRCERRS);
2838 	sc->stats.symerrs += E1000_READ_REG(&sc->hw, SYMERRS);
2839 	sc->stats.mpc += E1000_READ_REG(&sc->hw, MPC);
2840 	sc->stats.scc += E1000_READ_REG(&sc->hw, SCC);
2841 	sc->stats.ecol += E1000_READ_REG(&sc->hw, ECOL);
2842 
2843 	sc->stats.mcc += E1000_READ_REG(&sc->hw, MCC);
2844 	sc->stats.latecol += E1000_READ_REG(&sc->hw, LATECOL);
2845 	sc->stats.colc += E1000_READ_REG(&sc->hw, COLC);
2846 	sc->stats.dc += E1000_READ_REG(&sc->hw, DC);
2847 	sc->stats.sec += E1000_READ_REG(&sc->hw, SEC);
2848 	sc->stats.rlec += E1000_READ_REG(&sc->hw, RLEC);
2849 	sc->stats.xonrxc += E1000_READ_REG(&sc->hw, XONRXC);
2850 	sc->stats.xontxc += E1000_READ_REG(&sc->hw, XONTXC);
2851 	sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, XOFFRXC);
2852 	sc->stats.xofftxc += E1000_READ_REG(&sc->hw, XOFFTXC);
2853 	sc->stats.fcruc += E1000_READ_REG(&sc->hw, FCRUC);
2854 	sc->stats.prc64 += E1000_READ_REG(&sc->hw, PRC64);
2855 	sc->stats.prc127 += E1000_READ_REG(&sc->hw, PRC127);
2856 	sc->stats.prc255 += E1000_READ_REG(&sc->hw, PRC255);
2857 	sc->stats.prc511 += E1000_READ_REG(&sc->hw, PRC511);
2858 	sc->stats.prc1023 += E1000_READ_REG(&sc->hw, PRC1023);
2859 	sc->stats.prc1522 += E1000_READ_REG(&sc->hw, PRC1522);
2860 	sc->stats.gprc += E1000_READ_REG(&sc->hw, GPRC);
2861 	sc->stats.bprc += E1000_READ_REG(&sc->hw, BPRC);
2862 	sc->stats.mprc += E1000_READ_REG(&sc->hw, MPRC);
2863 	sc->stats.gptc += E1000_READ_REG(&sc->hw, GPTC);
2864 
2865 	/* For the 64-bit byte counters the low dword must be read first. */
2866 	/* Both registers clear on the read of the high dword */
2867 
2868 	sc->stats.gorcl += E1000_READ_REG(&sc->hw, GORCL);
2869 	sc->stats.gorch += E1000_READ_REG(&sc->hw, GORCH);
2870 	sc->stats.gotcl += E1000_READ_REG(&sc->hw, GOTCL);
2871 	sc->stats.gotch += E1000_READ_REG(&sc->hw, GOTCH);
2872 
2873 	sc->stats.rnbc += E1000_READ_REG(&sc->hw, RNBC);
2874 	sc->stats.ruc += E1000_READ_REG(&sc->hw, RUC);
2875 	sc->stats.rfc += E1000_READ_REG(&sc->hw, RFC);
2876 	sc->stats.roc += E1000_READ_REG(&sc->hw, ROC);
2877 	sc->stats.rjc += E1000_READ_REG(&sc->hw, RJC);
2878 
2879 	sc->stats.torl += E1000_READ_REG(&sc->hw, TORL);
2880 	sc->stats.torh += E1000_READ_REG(&sc->hw, TORH);
2881 	sc->stats.totl += E1000_READ_REG(&sc->hw, TOTL);
2882 	sc->stats.toth += E1000_READ_REG(&sc->hw, TOTH);
2883 
2884 	sc->stats.tpr += E1000_READ_REG(&sc->hw, TPR);
2885 	sc->stats.tpt += E1000_READ_REG(&sc->hw, TPT);
2886 	sc->stats.ptc64 += E1000_READ_REG(&sc->hw, PTC64);
2887 	sc->stats.ptc127 += E1000_READ_REG(&sc->hw, PTC127);
2888 	sc->stats.ptc255 += E1000_READ_REG(&sc->hw, PTC255);
2889 	sc->stats.ptc511 += E1000_READ_REG(&sc->hw, PTC511);
2890 	sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, PTC1023);
2891 	sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, PTC1522);
2892 	sc->stats.mptc += E1000_READ_REG(&sc->hw, MPTC);
2893 	sc->stats.bptc += E1000_READ_REG(&sc->hw, BPTC);
2894 
2895 	if (sc->hw.mac_type >= em_82543) {
2896 		sc->stats.algnerrc +=
2897 		E1000_READ_REG(&sc->hw, ALGNERRC);
2898 		sc->stats.rxerrc +=
2899 		E1000_READ_REG(&sc->hw, RXERRC);
2900 		sc->stats.tncrs +=
2901 		E1000_READ_REG(&sc->hw, TNCRS);
2902 		sc->stats.cexterr +=
2903 		E1000_READ_REG(&sc->hw, CEXTERR);
2904 		sc->stats.tsctc +=
2905 		E1000_READ_REG(&sc->hw, TSCTC);
2906 		sc->stats.tsctfc +=
2907 		E1000_READ_REG(&sc->hw, TSCTFC);
2908 	}
2909 	ifp = &sc->interface_data.ac_if;
2910 
2911 	/* Fill out the OS statistics structure */
2912 	ifp->if_ibytes = sc->stats.gorcl;
2913 	ifp->if_obytes = sc->stats.gotcl;
2914 	ifp->if_imcasts = sc->stats.mprc;
2915 	ifp->if_collisions = sc->stats.colc;
2916 
2917 	/* Rx Errors */
2918 	ifp->if_ierrors =
2919 	sc->dropped_pkts +
2920 	sc->stats.rxerrc +
2921 	sc->stats.crcerrs +
2922 	sc->stats.algnerrc +
2923 	sc->stats.rlec + sc->stats.rnbc +
2924 	sc->stats.mpc + sc->stats.cexterr;
2925 
2926 	/* Tx Errors */
2927 	ifp->if_oerrors = sc->stats.ecol + sc->stats.latecol;
2928 
2929 }
2930 
2931 
2932 /**********************************************************************
2933  *
2934  *  This routine is called only when em_display_debug_stats is enabled.
2935  *  This routine provides a way to take a look at important statistics
2936  *  maintained by the driver and hardware.
2937  *
2938  **********************************************************************/
2939 void
2940 em_print_debug_info(struct em_softc *sc)
2941 {
2942 	const char * const unit = sc->sc_dv.dv_xname;
2943 
2944 #ifdef DBG_STATS
2945 	printf("%s: Packets not Avail = %ld\n", unit,
2946 	       sc->no_pkts_avail);
2947 	printf("%s: CleanTxInterrupts = %ld\n", unit,
2948 	       sc->clean_tx_interrupts);
2949 #endif
2950 	printf("%s: fifo workaround = %lld, fifo_reset = %lld\n", unit,
2951 		(long long)sc->tx_fifo_wrk,
2952 		(long long)sc->tx_fifo_reset);
2953 	printf("%s: hw tdh = %d, hw tdt = %d\n", unit,
2954 		E1000_READ_REG(&sc->hw, TDH),
2955 		E1000_READ_REG(&sc->hw, TDT));
2956 	printf("%s: Num Tx Descriptors avail = %ld\n", unit,
2957 	       sc->num_tx_desc_avail);
2958 	printf("%s: Tx Descriptors not avail1 = %ld\n", unit,
2959 	       sc->no_tx_desc_avail1);
2960 	printf("%s: Tx Descriptors not avail2 = %ld\n", unit,
2961 	       sc->no_tx_desc_avail2);
2962 	printf("%s: Std mbuf failed = %ld\n", unit,
2963 		sc->mbuf_alloc_failed);
2964 	printf("%s: Std mbuf cluster failed = %ld\n", unit,
2965 		sc->mbuf_cluster_failed);
2966 	printf("%s: Driver dropped packets = %ld\n", unit,
2967 	       sc->dropped_pkts);
2968 
2969 	return;
2970 }
2971 
2972 void
2973 em_print_hw_stats(struct em_softc *sc)
2974 {
2975 	const char * const unit = sc->sc_dv.dv_xname;
2976 
2977 	printf("%s: Excessive collisions = %lld\n", unit,
2978 		(long long)sc->stats.ecol);
2979 	printf("%s: Symbol errors = %lld\n", unit,
2980 	       (long long)sc->stats.symerrs);
2981 	printf("%s: Sequence errors = %lld\n", unit,
2982 	       (long long)sc->stats.sec);
2983 	printf("%s: Defer count = %lld\n", unit,
2984 	       (long long)sc->stats.dc);
2985 
2986 	printf("%s: Missed Packets = %lld\n", unit,
2987 	       (long long)sc->stats.mpc);
2988 	printf("%s: Receive No Buffers = %lld\n", unit,
2989 	       (long long)sc->stats.rnbc);
2990 	printf("%s: Receive length errors = %lld\n", unit,
2991 	       (long long)sc->stats.rlec);
2992 	printf("%s: Receive errors = %lld\n", unit,
2993 	       (long long)sc->stats.rxerrc);
2994 	printf("%s: Crc errors = %lld\n", unit,
2995 	       (long long)sc->stats.crcerrs);
2996 	printf("%s: Alignment errors = %lld\n", unit,
2997 	       (long long)sc->stats.algnerrc);
2998 	printf("%s: Carrier extension errors = %lld\n", unit,
2999 	       (long long)sc->stats.cexterr);
3000 
3001 	printf("%s: XON Rcvd = %lld\n", unit,
3002 	       (long long)sc->stats.xonrxc);
3003 	printf("%s: XON Xmtd = %lld\n", unit,
3004 	       (long long)sc->stats.xontxc);
3005 	printf("%s: XOFF Rcvd = %lld\n", unit,
3006 	       (long long)sc->stats.xoffrxc);
3007 	printf("%s: XOFF Xmtd = %lld\n", unit,
3008 	       (long long)sc->stats.xofftxc);
3009 
3010 	printf("%s: Good Packets Rcvd = %lld\n", unit,
3011 	       (long long)sc->stats.gprc);
3012 	printf("%s: Good Packets Xmtd = %lld\n", unit,
3013 	       (long long)sc->stats.gptc);
3014 
3015 	return;
3016 }
3017 
3018 #ifdef __FreeBSD__
3019 int
3020 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3021 {
3022 	int error;
3023 	int result;
3024 	struct em_softc *sc;
3025 
3026 	result = -1;
3027 	error = sysctl_handle_int(oidp, &result, 0, req);
3028 
3029 	if (error || !req->newptr)
3030 		return (error);
3031 
3032 	if (result == 1) {
3033 		sc = (struct em_softc *)arg1;
3034 		em_print_debug_info(sc);
3035 	}
3036 
3037 	return error;
3038 }
3039 
3040 
3041 int
3042 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3043 {
3044 	int error;
3045 	int result;
3046 	struct em_softc *sc;
3047 
3048 	result = -1;
3049 	error = sysctl_handle_int(oidp, &result, 0, req);
3050 
3051 	if (error || !req->newptr)
3052 		return (error);
3053 
3054 	if (result == 1) {
3055 		sc = (struct em_softc *)arg1;
3056 		em_print_hw_stats(sc);
3057 	}
3058 
3059 	return error;
3060 }
3061 #endif /* __FreeBSD__ */
3062 
3063