xref: /openbsd-src/sys/dev/pci/if_em.c (revision 8500990981f885cbe5e6a4958549cacc238b5ae6)
1 /**************************************************************************
2 
3 Copyright (c) 2001-2003, Intel Corporation
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15 
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19 
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31 
32 ***************************************************************************/
33 
34 /*$FreeBSD: if_em.c,v 1.26 2003/06/05 17:51:37 pdeuskar Exp $*/
35 /* $OpenBSD: if_em.c,v 1.13 2003/10/13 21:19:29 jason Exp $ */
36 
37 #include "bpfilter.h"
38 #include "vlan.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sockio.h>
43 #include <sys/mbuf.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/device.h>
47 #include <sys/socket.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 
53 #ifdef INET
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/in_var.h>
57 #include <netinet/ip.h>
58 #include <netinet/if_ether.h>
59 #endif
60 
61 #if NVLAN > 0
62 #include <net/if_types.h>
63 #include <net/if_vlan_var.h>
64 #endif
65 
66 #if NBPFILTER > 0
67 #include <net/bpf.h>
68 #endif
69 
70 #include <uvm/uvm_extern.h>
71 
72 #include <dev/pci/pcireg.h>
73 #include <dev/pci/pcivar.h>
74 #include <dev/pci/pcidevs.h>
75 
76 #include <dev/pci/if_em.h>
77 
78 #ifdef DEBUG
79 #define EM_KASSERT(exp,msg)        do { if (!(exp)) panic msg; } while (0)
80 #else
81 #define EM_KASSERT(exp,msg)
82 #endif
83 
84 /*********************************************************************
85  *  Set this to one to display debug statistics
86  *********************************************************************/
87 int             em_display_debug_stats = 0;
88 
89 /*********************************************************************
90  *  Linked list of board private structures for all NICs found
91  *********************************************************************/
92 
93 struct em_softc *em_adapter_list = NULL;
94 
95 
96 /*********************************************************************
97  *  Driver version
98  *********************************************************************/
99 
100 char em_driver_version[] = "1.6.6";
101 
102 
103 /*********************************************************************
104  *  PCI Device ID Table
105  *********************************************************************/
106 const struct pci_matchid em_devices[] = {
107 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542 },
108 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_SC },
109 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC },
110 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI },
111 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_SC },
112 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC },
113 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM },
114 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM },
115 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM },
116 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB },
117 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_SC },
118 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_SC },
119 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI },
120 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM },
121 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM },
122 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP },
123 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EP },
124 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI },
125 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD },
126 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP }
127 };
128 
129 /*********************************************************************
130  *  Function prototypes
131  *********************************************************************/
132 int  em_probe(struct device *, void *, void *);
133 void em_attach(struct device *, struct device *, void *);
134 
135 #if 0
136 int  em_detach(void *);
137 int  em_shutdown(void *);
138 #endif
139 int  em_intr(void *);
140 void em_start(struct ifnet *);
141 int  em_ioctl(struct ifnet *, u_long, caddr_t);
142 void em_watchdog(struct ifnet *);
143 void em_init(void *);
144 void em_stop(void *);
145 void em_media_status(struct ifnet *, struct ifmediareq *);
146 int  em_media_change(struct ifnet *);
147 void em_identify_hardware(struct em_softc *);
148 int  em_allocate_pci_resources(struct em_softc *);
149 void em_free_pci_resources(struct em_softc *);
150 void em_local_timer(void *);
151 int  em_hardware_init(struct em_softc *);
152 void em_setup_interface(struct em_softc *);
153 int  em_setup_transmit_structures(struct em_softc *);
154 void em_initialize_transmit_unit(struct em_softc *);
155 int  em_setup_receive_structures(struct em_softc *);
156 void em_initialize_receive_unit(struct em_softc *);
157 void em_enable_intr(struct em_softc *);
158 void em_disable_intr(struct em_softc *);
159 void em_free_transmit_structures(struct em_softc *);
160 void em_free_receive_structures(struct em_softc *);
161 void em_update_stats_counters(struct em_softc *);
162 void em_clean_transmit_interrupts(struct em_softc *);
163 int  em_allocate_receive_structures(struct em_softc *);
164 int  em_allocate_transmit_structures(struct em_softc *);
165 void em_process_receive_interrupts(struct em_softc *, int);
166 void em_receive_checksum(struct em_softc *,
167 				     struct em_rx_desc *,
168 				     struct mbuf *);
169 void em_transmit_checksum_setup(struct em_softc *,
170 					    struct mbuf *,
171 					    u_int32_t *,
172 					    u_int32_t *);
173 void em_set_promisc(struct em_softc *);
174 void em_disable_promisc(struct em_softc *);
175 void em_set_multi(struct em_softc *);
176 void em_print_hw_stats(struct em_softc *);
177 void em_print_link_status(struct em_softc *);
178 int  em_get_buf(int i, struct em_softc *,
179 			    struct mbuf *);
180 void em_enable_vlans(struct em_softc *);
181 int  em_encap(struct em_softc *, struct mbuf *);
182 void em_smartspeed(struct em_softc *);
183 int  em_82547_fifo_workaround(struct em_softc *, int);
184 void em_82547_update_fifo_head(struct em_softc *, int);
185 int  em_82547_tx_fifo_reset(struct em_softc *);
186 void em_82547_move_tail(void *);
187 int  em_dma_malloc(struct em_softc *, bus_size_t,
188     struct em_dma_alloc *, int);
189 void em_dma_free(struct em_softc *, struct em_dma_alloc *);
190 void em_print_debug_info(struct em_softc *);
191 int  em_is_valid_ether_addr(u_int8_t *);
192 
193 /*********************************************************************
194  *  FreeBSD Device Interface Entry Points
195  *********************************************************************/
196 
197 struct cfattach em_ca = {
198 	sizeof(struct em_softc), em_probe, em_attach
199 };
200 
201 struct cfdriver em_cd = {
202 	0, "em", DV_IFNET
203 };
204 
205 /*********************************************************************
206  *  Device identification routine
207  *
208  *  em_probe determines if the driver should be loaded on
209  *  adapter based on PCI vendor/device id of the adapter.
210  *
211  *  return 0 on success, positive on failure
212  *********************************************************************/
213 
214 int
215 em_probe(struct device *parent, void *match, void *aux)
216 {
217 	INIT_DEBUGOUT("em_probe: begin");
218 
219 	return (pci_matchbyid((struct pci_attach_args *)aux, em_devices,
220 	    sizeof(em_devices)/sizeof(em_devices[0])));
221 }
222 
223 /*********************************************************************
224  *  Device initialization routine
225  *
226  *  The attach entry point is called when the driver is being loaded.
227  *  This routine identifies the type of hardware, allocates all resources
228  *  and initializes the hardware.
229  *
230  *  return 0 on success, positive on failure
231  *********************************************************************/
232 
233 void
234 em_attach(struct device *parent, struct device *self, void *aux)
235 {
236 	struct pci_attach_args *pa = aux;
237 #if 0
238 	pci_chipset_tag_t pc = pa->pa_pc;
239 #endif
240 	struct em_softc *sc = (struct em_softc *)self;
241 	int		s;
242 	int		tsize, rsize;
243 	int		error = 0;
244 
245 	INIT_DEBUGOUT("em_attach: begin");
246 	s = splimp();
247 
248 #ifdef __FreeBSD__
249 	/* Allocate, clear, and link in our sc structure */
250 	if (!(sc = device_get_softc(dev))) {
251 		printf("em: sc structure allocation failed\n");
252 		splx(s);
253 		return(ENOMEM);
254 	}
255 	bzero(sc, sizeof(struct em_softc ));
256 	sc->dev = dev;
257 	sc->osdep.dev = dev;
258 	sc->sc_dv.dv_xname = device_get_unit(dev);
259 #endif /* __FreeBSD__ */
260 
261 	sc->osdep.em_pa = *pa;
262 
263 	if (em_adapter_list != NULL)
264 		em_adapter_list->prev = sc;
265 	sc->next = em_adapter_list;
266 	em_adapter_list = sc;
267 
268 #ifdef __FreeBSD__
269 	/* SYSCTL stuff */
270 	sysctl_ctx_init(&sc->sysctl_ctx);
271 	sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
272 					       SYSCTL_STATIC_CHILDREN(_hw),
273 					       OID_AUTO,
274 					       device_get_nameunit(dev),
275 					       CTLFLAG_RD,
276 					       0, "");
277 	if (sc->sysctl_tree == NULL) {
278 		error = EIO;
279 		goto err_sysctl;
280 	}
281 
282 	SYSCTL_ADD_PROC(&sc->sysctl_ctx,
283 			SYSCTL_CHILDREN(sc->sysctl_tree),
284 			OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
285 			(void *)sc, 0,
286 			em_sysctl_debug_info, "I", "Debug Information");
287 
288 	SYSCTL_ADD_PROC(&sc->sysctl_ctx,
289 			SYSCTL_CHILDREN(sc->sysctl_tree),
290 			OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
291 			(void *)sc, 0,
292 			em_sysctl_stats, "I", "Statistics");
293 
294 	callout_handle_init(&sc->timer_handle);
295 	callout_handle_init(&sc->tx_fifo_timer_handle);
296 #endif /* __FreeBSD__ */
297 
298 	timeout_set(&sc->timer_handle, em_local_timer, sc);
299 	timeout_set(&sc->tx_fifo_timer_handle, em_82547_move_tail, sc);
300 
301 	/* Determine hardware revision */
302 	em_identify_hardware(sc);
303 
304 	/* Parameters (to be read from user) */
305 	sc->num_tx_desc = EM_MAX_TXD;
306 	sc->num_rx_desc = EM_MAX_RXD;
307 	sc->tx_int_delay = EM_TIDV;
308 	sc->tx_abs_int_delay = EM_TADV;
309 	sc->rx_int_delay = EM_RDTR;
310 	sc->rx_abs_int_delay = EM_RADV;
311 	sc->hw.autoneg = DO_AUTO_NEG;
312 	sc->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
313 	sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
314 	sc->hw.tbi_compatibility_en = TRUE;
315 	sc->rx_buffer_len = EM_RXBUFFER_2048;
316 
317 	/*
318 	 * These parameters control the automatic generation(Tx) and
319 	 * response(Rx) to Ethernet PAUSE frames.
320 	 */
321 	sc->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
322 	sc->hw.fc_low_water  = FC_DEFAULT_LO_THRESH;
323 	sc->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
324 	sc->hw.fc_send_xon   = TRUE;
325 	sc->hw.fc = em_fc_full;
326 
327 	sc->hw.phy_init_script = 1;
328 
329 	/*
330 	 * Set the max frame size assuming standard ethernet
331 	 * sized frames
332 	 */
333 	sc->hw.max_frame_size =
334 	    ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
335 
336 	sc->hw.min_frame_size =
337 	    MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
338 
339 	/*
340 	 * This controls when hardware reports transmit completion
341 	 * status.
342 	 */
343 	sc->hw.report_tx_early = 1;
344 
345 
346 	if (em_allocate_pci_resources(sc)) {
347 		printf("%s: Allocation of PCI resources failed\n",
348 		       sc->sc_dv.dv_xname);
349 		error = ENXIO;
350 		goto err_pci;
351 	}
352 
353 
354 	/* Initialize eeprom parameters */
355 	em_init_eeprom_params(&sc->hw);
356 
357 	tsize = EM_ROUNDUP(sc->num_tx_desc *
358 			   sizeof(struct em_tx_desc), 4096);
359 
360 	/* Allocate Transmit Descriptor ring */
361 	if (em_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
362 		printf("%s: Unable to allocate tx_desc memory\n",
363 		       sc->sc_dv.dv_xname);
364 		error = ENOMEM;
365 		goto err_tx_desc;
366 	}
367 	sc->tx_desc_base = (struct em_tx_desc *)sc->txdma.dma_vaddr;
368 
369 	rsize = EM_ROUNDUP(sc->num_rx_desc *
370 			   sizeof(struct em_rx_desc), 4096);
371 
372 	/* Allocate Receive Descriptor ring */
373 	if (em_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
374 		printf("%s: Unable to allocate rx_desc memory\n",
375 		       sc->sc_dv.dv_xname);
376 		error = ENOMEM;
377 		goto err_rx_desc;
378 	}
379 	sc->rx_desc_base = (struct em_rx_desc *) sc->rxdma.dma_vaddr;
380 
381 	/* Initialize the hardware */
382 	if (em_hardware_init(sc)) {
383 		printf("%s: Unable to initialize the hardware\n",
384 		       sc->sc_dv.dv_xname);
385 		error = EIO;
386 		goto err_hw_init;
387 	}
388 
389 	/* Copy the permanent MAC address out of the EEPROM */
390 	if (em_read_mac_addr(&sc->hw) < 0) {
391 		printf("%s: EEPROM read error while reading mac address\n",
392 		       sc->sc_dv.dv_xname);
393 		error = EIO;
394 		goto err_mac_addr;
395 	}
396 
397 	if (!em_is_valid_ether_addr(sc->hw.mac_addr)) {
398 		printf("%s: Invalid mac address\n", sc->sc_dv.dv_xname);
399 		error = EIO;
400 		goto err_mac_addr;
401 	}
402 
403 	bcopy(sc->hw.mac_addr, sc->interface_data.ac_enaddr,
404 	      ETHER_ADDR_LEN);
405 
406 	printf(", address: %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
407 
408 	/* Setup OS specific network interface */
409 	em_setup_interface(sc);
410 
411 	/* Initialize statistics */
412 	em_clear_hw_cntrs(&sc->hw);
413 	em_update_stats_counters(sc);
414 	sc->hw.get_link_status = 1;
415 	em_check_for_link(&sc->hw);
416 
417 	/* Print the link status */
418 	if (sc->link_active == 1) {
419 		em_get_speed_and_duplex(&sc->hw, &sc->link_speed,
420 					&sc->link_duplex);
421 	}
422 
423 	INIT_DEBUGOUT("em_attach: end");
424 	splx(s);
425 	return;
426 
427 err_mac_addr:
428 err_hw_init:
429 	em_dma_free(sc, &sc->rxdma);
430 err_rx_desc:
431 	em_dma_free(sc, &sc->txdma);
432 err_tx_desc:
433 err_pci:
434 	em_free_pci_resources(sc);
435 #ifdef __FreeBSD__
436 	sysctl_ctx_free(&sc->sysctl_ctx);
437 #endif /* __FreeBSD__ */
438 /*err_sysctl:*/
439 	splx(s);
440 	return;
441 
442 }
443 
444 /*********************************************************************
445  *  Device removal routine
446  *
447  *  The detach entry point is called when the driver is being removed.
448  *  This routine stops the adapter and deallocates all the resources
449  *  that were allocated for driver operation.
450  *
451  *  return 0 on success, positive on failure
452  *********************************************************************/
453 #ifdef __FreeBSD__
454 int
455 em_detach(void* arg)
456 {
457 	struct em_softc *sc = arg;
458 	struct ifnet   *ifp = &sc->interface_data.ac_if;
459 	int		s;
460 
461 	INIT_DEBUGOUT("em_detach: begin");
462 	s = splimp();
463 
464 	em_stop(sc);
465 	em_phy_hw_reset(&sc->hw);
466 #if __FreeBSD_version < 500000
467 	ether_ifdetach(&sc->interface_data.ac_if, ETHER_BPF_SUPPORTED);
468 #else
469 	ether_ifdetach(&sc->interface_data.ac_if);
470 #endif
471 	em_free_pci_resources(sc);
472 
473 	/* Free Transmit Descriptor ring */
474 	if (sc->tx_desc_base) {
475 		em_dma_free(sc, &sc->txdma);
476 		sc->tx_desc_base = NULL;
477 	}
478 
479 	/* Free Receive Descriptor ring */
480 	if (sc->rx_desc_base) {
481 		em_dma_free(sc, &sc->rxdma);
482 		sc->rx_desc_base = NULL;
483 	}
484 
485 	/* Remove from the adapter list */
486 	if (em_adapter_list == sc)
487 		em_adapter_list = sc->next;
488 	if (sc->next != NULL)
489 		sc->next->prev = sc->prev;
490 	if (sc->prev != NULL)
491 		sc->prev->next = sc->next;
492 
493 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
494 	ifp->if_timer = 0;
495 
496 	splx(s);
497 	return(0);
498 }
499 
500 /*********************************************************************
501  *
502  *  Shutdown entry point
503  *
504  **********************************************************************/
505 
506 int
507 em_shutdown(void* arg)
508 {
509 	struct em_softc *sc = arg;
510 	em_stop(sc);
511 	return(0);
512 }
513 
514 #endif /* __FreeBSD__ */
515 
516 /*********************************************************************
517  *  Transmit entry point
518  *
519  *  em_start is called by the stack to initiate a transmit.
520  *  The driver will remain in this routine as long as there are
521  *  packets to transmit and transmit resources are available.
522  *  In case resources are not available stack is notified and
523  *  the packet is requeued.
524  **********************************************************************/
525 
526 void
527 em_start(struct ifnet *ifp)
528 {
529 	int		s;
530 	struct mbuf    *m_head;
531 	struct em_softc *sc = ifp->if_softc;
532 
533 	if (!sc->link_active)
534 		return;
535 
536 	s = splimp();
537 
538 	for (;;) {
539 		IFQ_POLL(&ifp->if_snd, m_head);
540 
541 		if (m_head == NULL) break;
542 
543 		if (em_encap(sc, m_head)) {
544 			ifp->if_flags |= IFF_OACTIVE;
545 			break;
546 		}
547 
548 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
549 
550 #if NBPFILTER > 0
551 		/* Send a copy of the frame to the BPF listener */
552 		if (ifp->if_bpf)
553 			bpf_mtap(ifp->if_bpf, m_head);
554 #endif
555 
556 		/* Set timeout in case hardware has problems transmitting */
557 		ifp->if_timer = EM_TX_TIMEOUT;
558 
559 	}
560 	splx(s);
561 	return;
562 }
563 
564 /*********************************************************************
565  *  Ioctl entry point
566  *
567  *  em_ioctl is called when the user wants to configure the
568  *  interface.
569  *
570  *  return 0 on success, positive on failure
571  **********************************************************************/
572 
573 int
574 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
575 {
576 	int		s, error = 0;
577 	struct ifreq   *ifr = (struct ifreq *) data;
578 	struct ifaddr  *ifa = (struct ifaddr *)data;
579 	struct em_softc * sc = ifp->if_softc;
580 
581 	s = splimp();
582 
583 	if ((error = ether_ioctl(ifp, &sc->interface_data, command, data)) > 0) {
584 		splx(s);
585 		return (error);
586 	}
587 
588 	switch (command) {
589 	case SIOCSIFADDR:
590 #ifdef __FreeBSD__
591 	case SIOCGIFADDR:
592 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
593 		ether_ioctl(ifp, command, data);
594 		break;
595 #endif /* __FreeBSD__ */
596 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
597 			       "Addr)");
598 		ifp->if_flags |= IFF_UP;
599 		em_init(sc);
600 		switch (ifa->ifa_addr->sa_family) {
601 #ifdef INET
602 		case AF_INET:
603 			arp_ifinit(&sc->interface_data, ifa);
604 			break;
605 #endif /* INET */
606 		default:
607 			break;
608 		}
609 		break;
610 	case SIOCSIFMTU:
611 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
612 		if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
613 			error = EINVAL;
614 		} else {
615 			ifp->if_mtu = ifr->ifr_mtu;
616 			sc->hw.max_frame_size =
617 			ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
618 			em_init(sc);
619 		}
620 		break;
621 	case SIOCSIFFLAGS:
622 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
623 		if (ifp->if_flags & IFF_UP) {
624 			if (!(ifp->if_flags & IFF_RUNNING))
625 				em_init(sc);
626 
627 			em_disable_promisc(sc);
628 			em_set_promisc(sc);
629 		} else {
630 			if (ifp->if_flags & IFF_RUNNING) {
631 				em_stop(sc);
632 			}
633 		}
634 		break;
635 	case SIOCADDMULTI:
636 	case SIOCDELMULTI:
637 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
638 		error = (command == SIOCADDMULTI)
639 			? ether_addmulti(ifr, &sc->interface_data)
640 			: ether_delmulti(ifr, &sc->interface_data);
641 
642 		if (error == ENETRESET) {
643 			if (ifp->if_flags & IFF_RUNNING) {
644 				em_disable_intr(sc);
645 				em_set_multi(sc);
646 				if (sc->hw.mac_type == em_82542_rev2_0) {
647 					em_initialize_receive_unit(sc);
648 				}
649 #ifdef DEVICE_POLLING
650 				if (!(ifp->if_ipending & IFF_POLLING))
651 #endif
652 					em_enable_intr(sc);
653 			}
654 			error = 0;
655 		}
656 		break;
657 	case SIOCSIFMEDIA:
658 	case SIOCGIFMEDIA:
659 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
660 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
661 		break;
662 #ifdef __FreeBSD__
663 	case SIOCSIFCAP:
664 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
665 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
666 		if (mask & IFCAP_HWCSUM) {
667 			if (IFCAP_HWCSUM & ifp->if_capenable)
668 				ifp->if_capenable &= ~IFCAP_HWCSUM;
669 			else
670 				ifp->if_capenable |= IFCAP_HWCSUM;
671 			if (ifp->if_flags & IFF_RUNNING)
672 				em_init(sc);
673 		}
674 		break;
675 #endif /* __FreeBSD__ */
676 	default:
677 		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%d)\n", (int)command);
678 		error = EINVAL;
679 	}
680 
681 	splx(s);
682 	return(error);
683 }
684 
685 /*********************************************************************
686  *  Watchdog entry point
687  *
688  *  This routine is called whenever hardware quits transmitting.
689  *
690  **********************************************************************/
691 
692 void
693 em_watchdog(struct ifnet *ifp)
694 {
695 	struct em_softc * sc;
696 	sc = ifp->if_softc;
697 
698 	/* If we are in this routine because of pause frames, then
699 	 * don't reset the hardware.
700 	 */
701 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_TXOFF) {
702 		ifp->if_timer = EM_TX_TIMEOUT;
703 		return;
704 	}
705 
706 	printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
707 
708 	ifp->if_flags &= ~IFF_RUNNING;
709 
710 	em_stop(sc);
711 	em_init(sc);
712 
713 	ifp->if_oerrors++;
714 	return;
715 }
716 
717 /*********************************************************************
718  *  Init entry point
719  *
720  *  This routine is used in two ways. It is used by the stack as
721  *  init entry point in network interface structure. It is also used
722  *  by the driver as a hw/sw initialization routine to get to a
723  *  consistent state.
724  *
725  *  return 0 on success, positive on failure
726  **********************************************************************/
727 
728 void
729 em_init(void *arg)
730 {
731 	int		s;
732 	struct ifnet   *ifp;
733 	struct em_softc * sc = arg;
734 
735 	INIT_DEBUGOUT("em_init: begin");
736 
737 	s = splimp();
738 
739 	em_stop(sc);
740 
741 	/* Initialize the hardware */
742 	if (em_hardware_init(sc)) {
743 		printf("%s: Unable to initialize the hardware\n",
744 		       sc->sc_dv.dv_xname);
745 		splx(s);
746 		return;
747 	}
748 
749 	/* em_enable_vlans(sc); */
750 
751 	/* Prepare transmit descriptors and buffers */
752 	if (em_setup_transmit_structures(sc)) {
753 		printf("%s: Could not setup transmit structures\n",
754 		       sc->sc_dv.dv_xname);
755 		em_stop(sc);
756 		splx(s);
757 		return;
758 	}
759 	em_initialize_transmit_unit(sc);
760 
761 	/* Setup Multicast table */
762 	em_set_multi(sc);
763 
764 	/* Prepare receive descriptors and buffers */
765 	if (em_setup_receive_structures(sc)) {
766 		printf("%s: Could not setup receive structures\n",
767 		       sc->sc_dv.dv_xname);
768 		em_stop(sc);
769 		splx(s);
770 		return;
771 	}
772 	em_initialize_receive_unit(sc);
773 
774 	ifp = &sc->interface_data.ac_if;
775 	ifp->if_flags |= IFF_RUNNING;
776 	ifp->if_flags &= ~IFF_OACTIVE;
777 
778 #ifdef __FreeBSD__
779 	if (sc->hw.mac_type >= em_82543) {
780 		if (ifp->if_capenable & IFCAP_TXCSUM)
781 			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
782 		else
783 			ifp->if_hwassist = 0;
784 	}
785 #endif /* __FreeBSD__ */
786 
787 	timeout_add(&sc->timer_handle, 2*hz);
788 	em_clear_hw_cntrs(&sc->hw);
789 #ifdef DEVICE_POLLING
790         /*
791          * Only enable interrupts if we are not polling, make sure
792          * they are off otherwise.
793          */
794         if (ifp->if_ipending & IFF_POLLING)
795                 em_disable_intr(sc);
796         else
797 #endif /* DEVICE_POLLING */
798 		em_enable_intr(sc);
799 
800 	splx(s);
801 	return;
802 }
803 
804 
805 #ifdef DEVICE_POLLING
806 static poll_handler_t em_poll;
807 
808 static void
809 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
810 {
811 	struct em_softc *sc = ifp->if_softc;
812 	u_int32_t reg_icr;
813 
814 	if (cmd == POLL_DEREGISTER) {	    /* final call, enable interrupts */
815 		em_enable_intr(sc);
816 		return;
817 	}
818 	if (cmd == POLL_AND_CHECK_STATUS) {
819 		reg_icr = E1000_READ_REG(&sc->hw, ICR);
820 		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
821 			untimeout(em_local_timer, sc, sc->timer_handle);
822 			sc->hw.get_link_status = 1;
823 			em_check_for_link(&sc->hw);
824 			em_print_link_status(sc);
825 			sc->timer_handle = timeout(em_local_timer, sc, 2*hz);
826 		}
827 	}
828 	if (ifp->if_flags & IFF_RUNNING) {
829 		em_process_receive_interrupts(sc, count);
830 		em_clean_transmit_interrupts(sc);
831 	}
832 
833 	if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
834 		em_start(ifp);
835 }
836 #endif /* DEVICE_POLLING */
837 
838 /*********************************************************************
839  *
840  *  Interrupt Service routine
841  *
842  **********************************************************************/
843 int
844 em_intr(void *arg)
845 {
846 	u_int32_t	loop_cnt = EM_MAX_INTR;
847 	u_int32_t	reg_icr;
848 	struct ifnet	*ifp;
849 	struct em_softc *sc = arg;
850 
851 	ifp = &sc->interface_data.ac_if;
852 
853 #ifdef DEVICE_POLLING
854 	if (ifp->if_ipending & IFF_POLLING)
855 		return;
856 
857 	if (ether_poll_register(em_poll, ifp)) {
858 		em_disable_intr(sc);
859 		em_poll(ifp, 0, 1);
860 		return;
861 	}
862 #endif /* DEVICE_POLLING */
863 	reg_icr = E1000_READ_REG(&sc->hw, ICR);
864 	if (!reg_icr) {
865 		return (0);
866 	}
867 
868 	/* Link status change */
869 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
870 		timeout_del(&sc->timer_handle);
871 		sc->hw.get_link_status = 1;
872 		em_check_for_link(&sc->hw);
873 		em_print_link_status(sc);
874 		timeout_add(&sc->timer_handle, 2*hz);
875 	}
876 
877 	while (loop_cnt > 0) {
878 		if (ifp->if_flags & IFF_RUNNING) {
879 			em_process_receive_interrupts(sc, -1);
880 			em_clean_transmit_interrupts(sc);
881 		}
882 		loop_cnt--;
883 	}
884 
885 	if (ifp->if_flags & IFF_RUNNING && IFQ_IS_EMPTY(&ifp->if_snd) == 0)
886 		em_start(ifp);
887 
888 	return (1);
889 }
890 
891 
892 
893 /*********************************************************************
894  *
895  *  Media Ioctl callback
896  *
897  *  This routine is called whenever the user queries the status of
898  *  the interface using ifconfig.
899  *
900  **********************************************************************/
901 void
902 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
903 {
904 	struct em_softc * sc= ifp->if_softc;
905 
906 	INIT_DEBUGOUT("em_media_status: begin");
907 
908 	em_check_for_link(&sc->hw);
909 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU) {
910 		if (sc->link_active == 0) {
911 			em_get_speed_and_duplex(&sc->hw,
912 						&sc->link_speed,
913 						&sc->link_duplex);
914 			sc->link_active = 1;
915 		}
916 	} else {
917 		if (sc->link_active == 1) {
918 			sc->link_speed = 0;
919 			sc->link_duplex = 0;
920 			sc->link_active = 0;
921 		}
922 	}
923 
924 	ifmr->ifm_status = IFM_AVALID;
925 	ifmr->ifm_active = IFM_ETHER;
926 
927 	if (!sc->link_active)
928 		return;
929 
930 	ifmr->ifm_status |= IFM_ACTIVE;
931 
932 	if (sc->hw.media_type == em_media_type_fiber) {
933 		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
934 	} else {
935 		switch (sc->link_speed) {
936 		case 10:
937 			ifmr->ifm_active |= IFM_10_T;
938 			break;
939 		case 100:
940 			ifmr->ifm_active |= IFM_100_TX;
941 			break;
942 		case 1000:
943 			ifmr->ifm_active |= IFM_1000_T;
944 			break;
945 		}
946 		if (sc->link_duplex == FULL_DUPLEX)
947 			ifmr->ifm_active |= IFM_FDX;
948 		else
949 			ifmr->ifm_active |= IFM_HDX;
950 	}
951 	return;
952 }
953 
954 /*********************************************************************
955  *
956  *  Media Ioctl callback
957  *
958  *  This routine is called when the user changes speed/duplex using
959  *  media/mediopt option with ifconfig.
960  *
961  **********************************************************************/
962 int
963 em_media_change(struct ifnet *ifp)
964 {
965 	struct em_softc * sc = ifp->if_softc;
966 	struct ifmedia	*ifm = &sc->media;
967 
968 	INIT_DEBUGOUT("em_media_change: begin");
969 
970 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
971 		return(EINVAL);
972 
973 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
974 	case IFM_AUTO:
975 		sc->hw.autoneg = DO_AUTO_NEG;
976 		sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
977 		break;
978 	case IFM_1000_SX:
979 	case IFM_1000_T:
980 		sc->hw.autoneg = DO_AUTO_NEG;
981 		sc->hw.autoneg_advertised = ADVERTISE_1000_FULL;
982 		break;
983 	case IFM_100_TX:
984 		sc->hw.autoneg = FALSE;
985 		sc->hw.autoneg_advertised = 0;
986 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
987 			sc->hw.forced_speed_duplex = em_100_full;
988 		else
989 			sc->hw.forced_speed_duplex	= em_100_half;
990 		break;
991 	case IFM_10_T:
992 		sc->hw.autoneg = FALSE;
993 		sc->hw.autoneg_advertised = 0;
994 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
995 			sc->hw.forced_speed_duplex = em_10_full;
996 		else
997 			sc->hw.forced_speed_duplex	= em_10_half;
998 		break;
999 	default:
1000 		printf("%s: Unsupported media type\n", sc->sc_dv.dv_xname);
1001 	}
1002 
1003 	em_init(sc);
1004 
1005 	return(0);
1006 }
1007 
1008 #ifdef __FreeBSD__
1009 void
1010 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1011 {
1012 	struct em_q *q = arg;
1013 
1014 	if (error)
1015 		return;
1016 	EM_KASSERT(nsegs <= EM_MAX_SCATTER,
1017 		("Too many DMA segments returned when mapping tx packet"));
1018 	q->nsegs = nsegs;
1019 	bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1020 }
1021 #endif /* __FreeBSD__ */
1022 
1023 #define EM_FIFO_HDR		 0x10
1024 #define EM_82547_PKT_THRESH	 0x3e0
1025 #define EM_82547_TX_FIFO_SIZE	 0x2800
1026 #define EM_82547_TX_FIFO_BEGIN	 0xf00
1027 /*********************************************************************
1028  *
1029  *  This routine maps the mbufs to tx descriptors.
1030  *
1031  *  return 0 on success, positive on failure
1032  **********************************************************************/
1033 int
1034 em_encap(struct em_softc *sc, struct mbuf *m_head)
1035 {
1036 	u_int32_t	txd_upper;
1037 	u_int32_t	txd_lower;
1038 	int		i, j, error;
1039 #if NVLAN > 0
1040 	struct ifvlan *ifv = NULL;
1041 #endif
1042 	struct em_q	q;
1043 
1044 	struct em_buffer   *tx_buffer = NULL;
1045 	struct em_tx_desc *current_tx_desc = NULL;
1046 	/*struct ifnet	 *ifp = &sc->interface_data.ac_if;*/
1047 
1048 	/*
1049 	 * Force a cleanup if number of TX descriptors
1050 	 * available hits the threshold
1051 	 */
1052 	if (sc->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1053 		em_clean_transmit_interrupts(sc);
1054 		if (sc->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1055 			sc->no_tx_desc_avail1++;
1056 			return (ENOBUFS);
1057 		}
1058 	}
1059 
1060 	/*
1061 	 * Map the packet for DMA.
1062 	 */
1063 	if (bus_dmamap_create(sc->txtag, MCLBYTES, 32, 0, 0, BUS_DMA_NOWAIT,
1064 	    &q.map)) {
1065 		sc->no_tx_map_avail++;
1066 		return (ENOMEM);
1067 	}
1068 	error = bus_dmamap_load_mbuf(sc->txtag, q.map,
1069 				     m_head, BUS_DMA_NOWAIT);
1070 	if (error != 0) {
1071 		sc->no_tx_dma_setup++;
1072 		bus_dmamap_destroy(sc->txtag, q.map);
1073 		return (error);
1074 	}
1075 	EM_KASSERT(q.map->dm_nsegs!= 0, ("em_encap: empty packet"));
1076 
1077 	if (q.map->dm_nsegs > sc->num_tx_desc_avail) {
1078 		sc->no_tx_desc_avail2++;
1079 		bus_dmamap_destroy(sc->txtag, q.map);
1080 		return (ENOBUFS);
1081 	}
1082 
1083 
1084 #ifdef __FreeBSD__
1085 	if (ifp->if_hwassist > 0) {
1086 		em_transmit_checksum_setup(sc,	m_head,
1087 					   &txd_upper, &txd_lower);
1088 	} else
1089 #endif /* __FreeBSD__ */
1090 		txd_upper = txd_lower = 0;
1091 
1092 
1093 	/* Find out if we are in vlan mode */
1094 #if NVLAN > 0
1095 	if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1096 	    m_head->m_pkthdr.rcvif != NULL &&
1097 	    m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1098 		ifv = m_head->m_pkthdr.rcvif->if_softc;
1099 #endif
1100 
1101 	i = sc->next_avail_tx_desc;
1102 	for (j = 0; j < q.map->dm_nsegs; j++) {
1103 		tx_buffer = &sc->tx_buffer_area[i];
1104 		current_tx_desc = &sc->tx_desc_base[i];
1105 
1106 		current_tx_desc->buffer_addr = htole64(q.map->dm_segs[j].ds_addr);
1107 		current_tx_desc->lower.data = htole32(
1108 		    sc->txd_cmd | txd_lower | q.map->dm_segs[j].ds_len);
1109 		current_tx_desc->upper.data = htole32(txd_upper);
1110 
1111 		if (++i == sc->num_tx_desc)
1112 			i = 0;
1113 
1114 		tx_buffer->m_head = NULL;
1115 	}
1116 
1117 	sc->num_tx_desc_avail -= q.map->dm_nsegs;
1118 	sc->next_avail_tx_desc = i;
1119 
1120 #if NVLAN > 0
1121 	if (ifv != NULL) {
1122 		/* Set the vlan id */
1123 		current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1124 
1125 		/* Tell hardware to add tag */
1126 		current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1127 	}
1128 #endif
1129 
1130 	tx_buffer->m_head = m_head;
1131 	tx_buffer->map = q.map;
1132 	bus_dmamap_sync(sc->txtag, q.map, 0, q.map->dm_mapsize,
1133 	    BUS_DMASYNC_PREWRITE);
1134 
1135 	/*
1136 	 * Last Descriptor of Packet needs End Of Packet (EOP)
1137 	 */
1138 	current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1139 
1140 	/*
1141 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1142 	 * that this frame is available to transmit.
1143 	 */
1144 	if (sc->hw.mac_type == em_82547 &&
1145 	    sc->link_duplex == HALF_DUPLEX) {
1146 		em_82547_move_tail(sc);
1147 	} else {
1148 		E1000_WRITE_REG(&sc->hw, TDT, i);
1149 		if (sc->hw.mac_type == em_82547) {
1150 			em_82547_update_fifo_head(sc, m_head->m_pkthdr.len);
1151 		}
1152 	}
1153 
1154 	return (0);
1155 }
1156 
1157 /*********************************************************************
1158  *
1159  * 82547 workaround to avoid controller hang in half-duplex environment.
1160  * The workaround is to avoid queuing a large packet that would span
1161  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1162  * in this case. We do that only when FIFO is queiced.
1163  *
1164  **********************************************************************/
1165 void
1166 em_82547_move_tail(void *arg)
1167 {
1168 	int s;
1169 	struct em_softc *sc = arg;
1170 	uint16_t hw_tdt;
1171 	uint16_t sw_tdt;
1172 	struct em_tx_desc *tx_desc;
1173 	uint16_t length = 0;
1174 	boolean_t eop = 0;
1175 
1176 	s = splimp();
1177 	hw_tdt = E1000_READ_REG(&sc->hw, TDT);
1178 	sw_tdt = sc->next_avail_tx_desc;
1179 
1180 	while (hw_tdt != sw_tdt) {
1181 		tx_desc = &sc->tx_desc_base[hw_tdt];
1182 		length += tx_desc->lower.flags.length;
1183 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1184 		if(++hw_tdt == sc->num_tx_desc)
1185 			hw_tdt = 0;
1186 
1187 		if(eop) {
1188 			if (em_82547_fifo_workaround(sc, length)) {
1189 				sc->tx_fifo_wrk++;
1190 				timeout_add(&sc->tx_fifo_timer_handle, 1);
1191 				splx(s);
1192 				return;
1193 			}
1194 			else {
1195 				E1000_WRITE_REG(&sc->hw, TDT, hw_tdt);
1196 				em_82547_update_fifo_head(sc, length);
1197 				length = 0;
1198 			}
1199 		}
1200 	}
1201 	splx(s);
1202 	return;
1203 }
1204 
1205 int
1206 em_82547_fifo_workaround(struct em_softc *sc, int len)
1207 {
1208 	int fifo_space, fifo_pkt_len;
1209 
1210 	fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1211 
1212 	if (sc->link_duplex == HALF_DUPLEX) {
1213 		fifo_space = EM_82547_TX_FIFO_SIZE - sc->tx_fifo_head;
1214 
1215 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1216 			if (em_82547_tx_fifo_reset(sc)) {
1217 				return(0);
1218 			}
1219 			else {
1220 				return(1);
1221 			}
1222 		}
1223 	}
1224 
1225 	return(0);
1226 }
1227 
1228 void
1229 em_82547_update_fifo_head(struct em_softc *sc, int len)
1230 {
1231 	int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1232 
1233 	/* tx_fifo_head is always 16 byte aligned */
1234 	sc->tx_fifo_head += fifo_pkt_len;
1235 	if (sc->tx_fifo_head >= EM_82547_TX_FIFO_SIZE) {
1236 		sc->tx_fifo_head -= EM_82547_TX_FIFO_SIZE;
1237 	}
1238 
1239 	return;
1240 }
1241 
1242 
1243 int
1244 em_82547_tx_fifo_reset(struct em_softc *sc)
1245 {
1246 	uint32_t tctl;
1247 
1248 	if ( (E1000_READ_REG(&sc->hw, TDT) ==
1249 	      E1000_READ_REG(&sc->hw, TDH)) &&
1250 	     (E1000_READ_REG(&sc->hw, TDFT) ==
1251 	      E1000_READ_REG(&sc->hw, TDFH)) &&
1252 	     (E1000_READ_REG(&sc->hw, TDFTS) ==
1253 	      E1000_READ_REG(&sc->hw, TDFHS)) &&
1254 	     (E1000_READ_REG(&sc->hw, TDFPC) == 0)) {
1255 
1256 		/* Disable TX unit */
1257 		tctl = E1000_READ_REG(&sc->hw, TCTL);
1258 		E1000_WRITE_REG(&sc->hw, TCTL, tctl & ~E1000_TCTL_EN);
1259 
1260 		/* Reset FIFO pointers */
1261 		E1000_WRITE_REG(&sc->hw, TDFT, EM_82547_TX_FIFO_BEGIN);
1262 		E1000_WRITE_REG(&sc->hw, TDFH, EM_82547_TX_FIFO_BEGIN);
1263 		E1000_WRITE_REG(&sc->hw, TDFTS, EM_82547_TX_FIFO_BEGIN);
1264 		E1000_WRITE_REG(&sc->hw, TDFHS, EM_82547_TX_FIFO_BEGIN);
1265 
1266 		/* Re-enable TX unit */
1267 		E1000_WRITE_REG(&sc->hw, TCTL, tctl);
1268 		E1000_WRITE_FLUSH(&sc->hw);
1269 
1270 		sc->tx_fifo_head = 0;
1271 		sc->tx_fifo_reset++;
1272 
1273 		return(TRUE);
1274 	}
1275 	else {
1276 		return(FALSE);
1277 	}
1278 }
1279 
1280 void
1281 em_set_promisc(struct em_softc * sc)
1282 {
1283 
1284 	u_int32_t	reg_rctl;
1285 	struct ifnet   *ifp = &sc->interface_data.ac_if;
1286 
1287 	reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1288 
1289 	if (ifp->if_flags & IFF_PROMISC) {
1290 		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1291 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1292 	} else if (ifp->if_flags & IFF_ALLMULTI) {
1293 		reg_rctl |= E1000_RCTL_MPE;
1294 		reg_rctl &= ~E1000_RCTL_UPE;
1295 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1296 	}
1297 
1298 	return;
1299 }
1300 
1301 void
1302 em_disable_promisc(struct em_softc * sc)
1303 {
1304 	u_int32_t	reg_rctl;
1305 
1306 	reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1307 
1308 	reg_rctl &=  (~E1000_RCTL_UPE);
1309 	reg_rctl &=  (~E1000_RCTL_MPE);
1310 	E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1311 
1312 	return;
1313 }
1314 
1315 
1316 /*********************************************************************
1317  *  Multicast Update
1318  *
1319  *  This routine is called whenever multicast address list is updated.
1320  *
1321  **********************************************************************/
1322 
1323 void
1324 em_set_multi(struct em_softc * sc)
1325 {
1326 	u_int32_t reg_rctl = 0;
1327 	u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1328 #ifdef __FreeBSD__
1329 	struct ifmultiaddr  *ifma;
1330 #endif
1331 	int mcnt = 0;
1332 #ifdef __FreeBSD__
1333 	struct ifnet   *ifp = &sc->interface_data.ac_if;
1334 #endif
1335 
1336 	IOCTL_DEBUGOUT("em_set_multi: begin");
1337 
1338 	if (sc->hw.mac_type == em_82542_rev2_0) {
1339 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1340 		if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1341 			em_pci_clear_mwi(&sc->hw);
1342 		}
1343 		reg_rctl |= E1000_RCTL_RST;
1344 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1345 		msec_delay(5);
1346 	}
1347 
1348 #ifdef __FreeBSD__
1349 #if __FreeBSD_version < 500000
1350 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1351 #else
1352 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1353 #endif
1354 		if (ifma->ifma_addr->sa_family != AF_LINK)
1355 			continue;
1356 
1357 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break;
1358 
1359 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1360 		      &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1361 		mcnt++;
1362 	}
1363 #endif /* __FreeBSD__ */
1364 
1365 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1366 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1367 		reg_rctl |= E1000_RCTL_MPE;
1368 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1369 	} else
1370 		em_mc_addr_list_update(&sc->hw, mta, mcnt, 0);
1371 
1372 	if (sc->hw.mac_type == em_82542_rev2_0) {
1373 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1374 		reg_rctl &= ~E1000_RCTL_RST;
1375 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1376 		msec_delay(5);
1377 		if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) {
1378 			em_pci_set_mwi(&sc->hw);
1379 		}
1380 	}
1381 
1382 	return;
1383 }
1384 
1385 
1386 /*********************************************************************
1387  *  Timer routine
1388  *
1389  *  This routine checks for link status and updates statistics.
1390  *
1391  **********************************************************************/
1392 
1393 void
1394 em_local_timer(void *arg)
1395 {
1396 	int s;
1397 	struct ifnet   *ifp;
1398 	struct em_softc * sc = arg;
1399 	ifp = &sc->interface_data.ac_if;
1400 
1401 	s = splimp();
1402 
1403 	em_check_for_link(&sc->hw);
1404 	em_print_link_status(sc);
1405 	em_update_stats_counters(sc);
1406 	if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING) {
1407 		em_print_hw_stats(sc);
1408 	}
1409 	em_smartspeed(sc);
1410 
1411 	timeout_add(&sc->timer_handle, 2*hz);
1412 
1413 	splx(s);
1414 	return;
1415 }
1416 
1417 void
1418 em_print_link_status(struct em_softc * sc)
1419 {
1420 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU) {
1421 		if (sc->link_active == 0) {
1422 			em_get_speed_and_duplex(&sc->hw,
1423 						&sc->link_speed,
1424 						&sc->link_duplex);
1425 			sc->link_active = 1;
1426 			sc->smartspeed = 0;
1427 		}
1428 	} else {
1429 		if (sc->link_active == 1) {
1430 			sc->link_speed = 0;
1431 			sc->link_duplex = 0;
1432 			sc->link_active = 0;
1433 		}
1434 	}
1435 
1436 	return;
1437 }
1438 
1439 /*********************************************************************
1440  *
1441  *  This routine disables all traffic on the sc by issuing a
1442  *  global reset on the MAC and deallocates TX/RX buffers.
1443  *
1444  **********************************************************************/
1445 
1446 void
1447 em_stop(void *arg)
1448 {
1449 	struct ifnet   *ifp;
1450 	struct em_softc * sc = arg;
1451 	ifp = &sc->interface_data.ac_if;
1452 
1453 	INIT_DEBUGOUT("em_stop: begin\n");
1454 	em_disable_intr(sc);
1455 	em_reset_hw(&sc->hw);
1456 	timeout_del(&sc->timer_handle);
1457 	timeout_del(&sc->tx_fifo_timer_handle);
1458 	em_free_transmit_structures(sc);
1459 	em_free_receive_structures(sc);
1460 
1461 
1462 	/* Tell the stack that the interface is no longer active */
1463 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1464 
1465 	return;
1466 }
1467 
1468 
1469 /*********************************************************************
1470  *
1471  *  Determine hardware revision.
1472  *
1473  **********************************************************************/
1474 void
1475 em_identify_hardware(struct em_softc * sc)
1476 {
1477 	u_int32_t reg;
1478 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1479 
1480 	/* Make sure our PCI config space has the necessary stuff set */
1481 	sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
1482 					    PCI_COMMAND_STATUS_REG);
1483 	if (!((sc->hw.pci_cmd_word & PCI_COMMAND_MASTER_ENABLE) &&
1484 	      (sc->hw.pci_cmd_word & PCI_COMMAND_MEM_ENABLE))) {
1485 		printf("%s: Memory Access and/or Bus Master bits were not set!\n",
1486 		       sc->sc_dv.dv_xname);
1487 		sc->hw.pci_cmd_word |=
1488 		(PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE);
1489 		pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1490 			       sc->hw.pci_cmd_word);
1491 	}
1492 
1493 	/* Save off the information about this board */
1494 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1495 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1496 
1497 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1498 	sc->hw.revision_id = PCI_REVISION(reg);
1499 
1500 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1501 
1502 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1503 	sc->hw.subsystem_id = PCI_PRODUCT(reg);
1504 
1505 	/* Identify the MAC */
1506 	if (em_set_mac_type(&sc->hw))
1507 		printf("%s: Unknown MAC Type\n", sc->sc_dv.dv_xname);
1508 
1509 	return;
1510 }
1511 
1512 int
1513 em_allocate_pci_resources(struct em_softc * sc)
1514 {
1515 	int		i, val, rid;
1516 	pci_intr_handle_t	ih;
1517 	const char		*intrstr = NULL;
1518 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1519 	pci_chipset_tag_t	pc = pa->pa_pc;
1520 
1521 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_MMBA);
1522 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1523 		printf(": mmba isn't memory");
1524 		return (ENXIO);
1525 	}
1526 	if (pci_mapreg_map(pa, EM_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
1527 	    &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
1528 	    &sc->osdep.em_membase, &sc->osdep.em_memsize, 0)) {
1529 		printf(": can't find mem space");
1530 		return (ENXIO);
1531 	}
1532 
1533 	if (sc->hw.mac_type > em_82543) {
1534 		/* Figure our where our IO BAR is ? */
1535 		rid = EM_MMBA;
1536 		for (i = 0; i < 5; i++) {
1537 			val = pci_conf_read(pa->pa_pc, pa->pa_tag, rid);
1538 			if (val & 0x00000001) {
1539 				sc->io_rid = rid;
1540 				break;
1541 			}
1542 			rid += 4;
1543 		}
1544 		if (pci_mapreg_map(pa, rid, PCI_MAPREG_TYPE_IO, 0,
1545 				   &sc->osdep.em_iobtag,
1546 				   &sc->osdep.em_iobhandle,
1547 				   &sc->osdep.em_iobase,
1548 				   &sc->osdep.em_iosize, 0)) {
1549 			printf(": can't find io space");
1550 			return (ENXIO);
1551 		}
1552 	}
1553 
1554 	if (pci_intr_map(pa, &ih)) {
1555 		printf(": couldn't map interrupt\n");
1556 		return (ENXIO);
1557 	}
1558 
1559 	intrstr = pci_intr_string(pc, ih);
1560 	sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, em_intr, sc,
1561 					      sc->sc_dv.dv_xname);
1562 	if (sc->sc_intrhand == NULL) {
1563 		printf(": couldn't establish interrupt");
1564 		if (intrstr != NULL)
1565 			printf(" at %s", intrstr);
1566 		printf("\n");
1567 		return (ENXIO);
1568 	}
1569 	printf(": %s", intrstr);
1570 
1571 	sc->hw.back = &sc->osdep;
1572 
1573 	return(0);
1574 }
1575 
1576 void
1577 em_free_pci_resources(struct em_softc* sc)
1578 {
1579 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1580 	pci_chipset_tag_t	pc = pa->pa_pc;
1581 
1582 	if(sc->sc_intrhand)
1583 		pci_intr_disestablish(pc, sc->sc_intrhand);
1584 	sc->sc_intrhand = 0;
1585 
1586 	if(sc->osdep.em_iobase)
1587 		bus_space_unmap(sc->osdep.em_iobtag, sc->osdep.em_iobhandle,
1588 				sc->osdep.em_iosize);
1589 	sc->osdep.em_iobase = 0;
1590 
1591 	if(sc->osdep.em_membase)
1592 		bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
1593 				sc->osdep.em_memsize);
1594 	sc->osdep.em_membase = 0;
1595 
1596 }
1597 
1598 /*********************************************************************
1599  *
1600  *  Initialize the hardware to a configuration as specified by the
1601  *  em_softc structure. The controller is reset, the EEPROM is
1602  *  verified, the MAC address is set, then the shared initialization
1603  *  routines are called.
1604  *
1605  **********************************************************************/
1606 int
1607 em_hardware_init(struct em_softc * sc)
1608 {
1609 	/* Issue a global reset */
1610 	em_reset_hw(&sc->hw);
1611 
1612 	/* When hardware is reset, fifo_head is also reset */
1613 	sc->tx_fifo_head = 0;
1614 
1615 	/* Make sure we have a good EEPROM before we read from it */
1616 	if (em_validate_eeprom_checksum(&sc->hw) < 0) {
1617 		printf("%s: The EEPROM Checksum Is Not Valid\n",
1618 		       sc->sc_dv.dv_xname);
1619 		return(EIO);
1620 	}
1621 
1622 	if (em_read_part_num(&sc->hw, &(sc->part_num)) < 0) {
1623 		printf("%s: EEPROM read error while reading part number\n",
1624 		       sc->sc_dv.dv_xname);
1625 		return(EIO);
1626 	}
1627 
1628 	if (em_init_hw(&sc->hw) < 0) {
1629 		printf("%s: Hardware Initialization Failed",
1630 		       sc->sc_dv.dv_xname);
1631 		return(EIO);
1632 	}
1633 
1634 	em_check_for_link(&sc->hw);
1635 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU)
1636 		sc->link_active = 1;
1637 	else
1638 		sc->link_active = 0;
1639 
1640 	if (sc->link_active) {
1641 		em_get_speed_and_duplex(&sc->hw,
1642 					&sc->link_speed,
1643 					&sc->link_duplex);
1644 	} else {
1645 		sc->link_speed = 0;
1646 		sc->link_duplex = 0;
1647 	}
1648 
1649 	return(0);
1650 }
1651 
1652 /*********************************************************************
1653  *
1654  *  Setup networking device structure and register an interface.
1655  *
1656  **********************************************************************/
1657 void
1658 em_setup_interface(struct em_softc * sc)
1659 {
1660 	struct ifnet   *ifp;
1661 	INIT_DEBUGOUT("em_setup_interface: begin");
1662 
1663 	ifp = &sc->interface_data.ac_if;
1664 	ifp->if_mtu = ETHERMTU;
1665 	ifp->if_output = ether_output;
1666 	ifp->if_baudrate = 1000000000;
1667 #ifdef __FreeBSD__
1668 	ifp->if_init =	em_init;
1669 #endif
1670 	ifp->if_softc = sc;
1671 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1672 	ifp->if_ioctl = em_ioctl;
1673 	ifp->if_start = em_start;
1674 	ifp->if_watchdog = em_watchdog;
1675 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1676 	IFQ_SET_READY(&ifp->if_snd);
1677 
1678 	strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
1679 
1680 #ifdef __FreeBSD__
1681 	if (sc->hw.mac_type >= em_82543) {
1682 		ifp->if_capabilities = IFCAP_HWCSUM;
1683 		ifp->if_capenable = ifp->if_capabilities;
1684 	}
1685 
1686 	/*
1687 	 * Tell the upper layer(s) we support long frames.
1688 	 */
1689 	ifp->if_data.ifi_hdrlen = sizeof(struct ehter_vlan_header);
1690 #if __FreeBSD_version >= 500000
1691 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1692 #endif
1693 #endif /* __FreeBSD__ */
1694 
1695 #ifdef __OpenBSD__
1696 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1697 #endif
1698 
1699 	/*
1700 	 * Specify the media types supported by this adapter and register
1701 	 * callbacks to update media and link information
1702 	 */
1703 	ifmedia_init(&sc->media, IFM_IMASK, em_media_change,
1704 		     em_media_status);
1705 	if (sc->hw.media_type == em_media_type_fiber) {
1706 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1707 			    0, NULL);
1708 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX,
1709 			    0, NULL);
1710 	} else {
1711 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1712 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1713 			    0, NULL);
1714 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX,
1715 			    0, NULL);
1716 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1717 			    0, NULL);
1718 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1719 			    0, NULL);
1720 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1721 	}
1722 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1723 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1724 
1725 	if_attach(ifp);
1726 	ether_ifattach(ifp);
1727 
1728 	return;
1729 }
1730 
1731 
1732 /*********************************************************************
1733  *
1734  *  Workaround for SmartSpeed on 82541 and 82547 controllers
1735  *
1736  **********************************************************************/
1737 void
1738 em_smartspeed(struct em_softc *sc)
1739 {
1740 	uint16_t phy_tmp;
1741 
1742 	if(sc->link_active || (sc->hw.phy_type != em_phy_igp) ||
1743 	   !sc->hw.autoneg || !(sc->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1744 		return;
1745 
1746 	if(sc->smartspeed == 0) {
1747 		/* If Master/Slave config fault is asserted twice,
1748 		 * we assume back-to-back */
1749 		em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
1750 		if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return;
1751 		em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
1752 		if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1753 			em_read_phy_reg(&sc->hw, PHY_1000T_CTRL,
1754 					&phy_tmp);
1755 			if(phy_tmp & CR_1000T_MS_ENABLE) {
1756 				phy_tmp &= ~CR_1000T_MS_ENABLE;
1757 				em_write_phy_reg(&sc->hw,
1758 						    PHY_1000T_CTRL, phy_tmp);
1759 				sc->smartspeed++;
1760 				if(sc->hw.autoneg &&
1761 				   !em_phy_setup_autoneg(&sc->hw) &&
1762 				   !em_read_phy_reg(&sc->hw, PHY_CTRL,
1763 						       &phy_tmp)) {
1764 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
1765 						    MII_CR_RESTART_AUTO_NEG);
1766 					em_write_phy_reg(&sc->hw,
1767 							 PHY_CTRL, phy_tmp);
1768 				}
1769 			}
1770 		}
1771 		return;
1772 	} else if(sc->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1773 		/* If still no link, perhaps using 2/3 pair cable */
1774 		em_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
1775 		phy_tmp |= CR_1000T_MS_ENABLE;
1776 		em_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
1777 		if(sc->hw.autoneg &&
1778 		   !em_phy_setup_autoneg(&sc->hw) &&
1779 		   !em_read_phy_reg(&sc->hw, PHY_CTRL, &phy_tmp)) {
1780 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
1781 				    MII_CR_RESTART_AUTO_NEG);
1782 			em_write_phy_reg(&sc->hw, PHY_CTRL, phy_tmp);
1783 		}
1784 	}
1785 	/* Restart process after EM_SMARTSPEED_MAX iterations */
1786 	if(sc->smartspeed++ == EM_SMARTSPEED_MAX)
1787 		sc->smartspeed = 0;
1788 
1789 	return;
1790 }
1791 
1792 
1793 /*
1794  * Manage DMA'able memory.
1795  */
1796 
1797 #ifdef __FreeBSD__
1798 void
1799 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1800 {
1801 	if (error)
1802 		return;
1803 	*(bus_addr_t*) arg = segs->ds_addr;
1804 	return;
1805 }
1806 #endif /* __FreeBSD__ */
1807 
1808 int
1809 em_dma_malloc(struct em_softc *sc, bus_size_t size,
1810 	struct em_dma_alloc *dma, int mapflags)
1811 {
1812 	int r;
1813 
1814 #ifdef __FreeBSD__
1815 	r = bus_dma_tag_create(NULL,			/* parent */
1816 			       PAGE_SIZE, 0,		/* alignment, bounds */
1817 			       BUS_SPACE_MAXADDR,	/* lowaddr */
1818 			       BUS_SPACE_MAXADDR,	/* highaddr */
1819 			       NULL, NULL,		/* filter, filterarg */
1820 			       size,			/* maxsize */
1821 			       1,			/* nsegments */
1822 			       size,			/* maxsegsize */
1823 			       BUS_DMA_ALLOCNOW,	/* flags */
1824 			       &dma->dma_tag);
1825 	if (r != 0) {
1826 		printf("%s: em_dma_malloc: bus_dma_tag_create failed; "
1827 			"error %u\n", sc->sc_dv.dv_xname, r);
1828 		goto fail_0;
1829 	}
1830 
1831 	r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map);
1832 #endif /* __FreeBSD__ */
1833 	dma->dma_tag = sc->osdep.em_pa.pa_dmat;
1834 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1835 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1836 
1837 	if (r != 0) {
1838 		printf("%s: em_dma_malloc: bus_dmamap_create failed; "
1839 			"error %u\n", sc->sc_dv.dv_xname, r);
1840 		goto fail_0;
1841 	}
1842 
1843 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1844 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1845 	if (r != 0) {
1846 		printf("%s: em_dma_malloc: bus_dmammem_alloc failed; "
1847 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
1848 			(unsigned long)size, r);
1849 		goto fail_1;
1850 	}
1851 
1852 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1853 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1854 	if (r != 0) {
1855 		printf("%s: em_dma_malloc: bus_dmammem_map failed; "
1856 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
1857 			(unsigned long)size, r);
1858 		goto fail_2;
1859 	}
1860 
1861 	r = bus_dmamap_load(sc->osdep.em_pa.pa_dmat, dma->dma_map,
1862 			    dma->dma_vaddr,
1863 			    size,
1864 			    NULL,
1865 			    mapflags | BUS_DMA_NOWAIT);
1866 	if (r != 0) {
1867 		printf("%s: em_dma_malloc: bus_dmamap_load failed; "
1868 			"error %u\n", sc->sc_dv.dv_xname, r);
1869 		goto fail_3;
1870 	}
1871 
1872 	dma->dma_size = size;
1873 	return (0);
1874 
1875 /* fail_4: */
1876 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1877 fail_3:
1878 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1879 fail_2:
1880 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1881 fail_1:
1882 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1883 	/* bus_dma_tag_destroy(dma->dma_tag); */
1884 fail_0:
1885 	dma->dma_map = NULL;
1886 	/* dma->dma_tag = NULL; */
1887 	return (r);
1888 }
1889 
1890 void
1891 em_dma_free(struct em_softc *sc, struct em_dma_alloc *dma)
1892 {
1893 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1894 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1895 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1896 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1897 	/* bus_dma_tag_destroy(dma->dma_tag); */
1898 }
1899 
1900 
1901 /*********************************************************************
1902  *
1903  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1904  *  the information needed to transmit a packet on the wire.
1905  *
1906  **********************************************************************/
1907 int
1908 em_allocate_transmit_structures(struct em_softc * sc)
1909 {
1910 	if (!(sc->tx_buffer_area =
1911 	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
1912 					     sc->num_tx_desc, M_DEVBUF,
1913 					     M_NOWAIT))) {
1914 		printf("%s: Unable to allocate tx_buffer memory\n",
1915 		       sc->sc_dv.dv_xname);
1916 		return ENOMEM;
1917 	}
1918 
1919 	bzero(sc->tx_buffer_area,
1920 	      sizeof(struct em_buffer) * sc->num_tx_desc);
1921 
1922 	return 0;
1923 }
1924 
1925 /*********************************************************************
1926  *
1927  *  Allocate and initialize transmit structures.
1928  *
1929  **********************************************************************/
1930 int
1931 em_setup_transmit_structures(struct em_softc* sc)
1932 {
1933 #ifdef __FreeBSD__
1934 	/*
1935 	 * Setup DMA descriptor areas.
1936 	 */
1937 	if (bus_dma_tag_create(NULL,	/* parent */
1938 		    PAGE_SIZE, 0,	/* alignment, bounds */
1939 		    BUS_SPACE_MAXADDR,       /* lowaddr */
1940 		    BUS_SPACE_MAXADDR,       /* highaddr */
1941 		    NULL, NULL,              /* filter, filterarg */
1942 		    MCLBYTES * 8,            /* maxsize */
1943 		    EM_MAX_SCATTER,          /* nsegments */
1944 		    MCLBYTES * 8,            /* maxsegsize */
1945 		    BUS_DMA_ALLOCNOW,        /* flags */
1946 		    &sc->txtag)) {
1947 		printf("%s: Unable to allocate TX DMA tag\n", sc->sc_dv.dv_xname);
1948 		return (ENOMEM);
1949 	}
1950 
1951 #endif /* __FreeBSD__ */
1952 	sc->txtag = sc->osdep.em_pa.pa_dmat;
1953 
1954 	if (em_allocate_transmit_structures(sc))
1955 		return (ENOMEM);
1956 
1957 	bzero((void *) sc->tx_desc_base,
1958 	      (sizeof(struct em_tx_desc)) * sc->num_tx_desc);
1959 
1960 	sc->next_avail_tx_desc = 0;
1961 	sc->oldest_used_tx_desc = 0;
1962 
1963 	/* Set number of descriptors available */
1964 	sc->num_tx_desc_avail = sc->num_tx_desc;
1965 
1966 	/* Set checksum context */
1967 	sc->active_checksum_context = OFFLOAD_NONE;
1968 
1969 	return (0);
1970 }
1971 
1972 /*********************************************************************
1973  *
1974  *  Enable transmit unit.
1975  *
1976  **********************************************************************/
1977 void
1978 em_initialize_transmit_unit(struct em_softc * sc)
1979 {
1980 	u_int32_t	reg_tctl;
1981 	u_int32_t	reg_tipg = 0;
1982 	u_int64_t	bus_addr;
1983 
1984 	/* Setup the Base and Length of the Tx Descriptor Ring */
1985 	bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
1986 	E1000_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
1987 	E1000_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
1988 	E1000_WRITE_REG(&sc->hw, TDLEN,
1989 			sc->num_tx_desc *
1990 			sizeof(struct em_tx_desc));
1991 
1992 	/* Setup the HW Tx Head and Tail descriptor pointers */
1993 	E1000_WRITE_REG(&sc->hw, TDH, 0);
1994 	E1000_WRITE_REG(&sc->hw, TDT, 0);
1995 
1996 
1997 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
1998 		     E1000_READ_REG(&sc->hw, TDBAL),
1999 		     E1000_READ_REG(&sc->hw, TDLEN));
2000 
2001 	/* Set the default values for the Tx Inter Packet Gap timer */
2002 	switch (sc->hw.mac_type) {
2003 	case em_82542_rev2_0:
2004 	case em_82542_rev2_1:
2005 		reg_tipg = DEFAULT_82542_TIPG_IPGT;
2006 		reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2007 		reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2008 		break;
2009 	default:
2010 		if (sc->hw.media_type == em_media_type_fiber)
2011 			reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2012 		else
2013 			reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2014 			reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2015 			reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2016 	}
2017 
2018 	E1000_WRITE_REG(&sc->hw, TIPG, reg_tipg);
2019 	E1000_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
2020 	if(sc->hw.mac_type >= em_82540)
2021 		E1000_WRITE_REG(&sc->hw, TADV, sc->tx_abs_int_delay);
2022 
2023 	/* Program the Transmit Control Register */
2024 	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2025 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2026 	if (sc->link_duplex == 1) {
2027 		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2028 	} else {
2029 		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2030 	}
2031 	E1000_WRITE_REG(&sc->hw, TCTL, reg_tctl);
2032 
2033 	/* Setup Transmit Descriptor Settings for this adapter */
2034 	sc->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2035 
2036 	if (sc->tx_int_delay > 0)
2037 		sc->txd_cmd |= E1000_TXD_CMD_IDE;
2038 
2039 	return;
2040 }
2041 
2042 /*********************************************************************
2043  *
2044  *  Free all transmit related data structures.
2045  *
2046  **********************************************************************/
2047 void
2048 em_free_transmit_structures(struct em_softc* sc)
2049 {
2050 	struct em_buffer   *tx_buffer;
2051 	int		i;
2052 
2053 	INIT_DEBUGOUT("free_transmit_structures: begin");
2054 
2055 	if (sc->tx_buffer_area != NULL) {
2056 		tx_buffer = sc->tx_buffer_area;
2057 		for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2058 			if (tx_buffer->m_head != NULL) {
2059 				bus_dmamap_unload(sc->txtag, tx_buffer->map);
2060 				bus_dmamap_destroy(sc->txtag, tx_buffer->map);
2061 				m_freem(tx_buffer->m_head);
2062 			}
2063 			tx_buffer->m_head = NULL;
2064 		}
2065 	}
2066 	if (sc->tx_buffer_area != NULL) {
2067 		free(sc->tx_buffer_area, M_DEVBUF);
2068 		sc->tx_buffer_area = NULL;
2069 	}
2070 	if (sc->txtag != NULL) {
2071 #ifdef __FreeBSD__
2072 		bus_dma_tag_destroy(sc->txtag);
2073 #endif
2074 		sc->txtag = NULL;
2075 	}
2076 	return;
2077 }
2078 
2079 /*********************************************************************
2080  *
2081  *  The offload context needs to be set when we transfer the first
2082  *  packet of a particular protocol (TCP/UDP). We change the
2083  *  context only if the protocol type changes.
2084  *
2085  **********************************************************************/
2086 #ifdef __FreeBSD__
2087 void
2088 em_transmit_checksum_setup(struct em_softc * sc,
2089 			   struct mbuf *mp,
2090 			   u_int32_t *txd_upper,
2091 			   u_int32_t *txd_lower)
2092 {
2093 	struct em_context_desc *TXD;
2094 	struct em_buffer *tx_buffer;
2095 	int curr_txd;
2096 
2097 	if (mp->m_pkthdr.csum_flags) {
2098 
2099 		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2100 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2101 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2102 			if (sc->active_checksum_context == OFFLOAD_TCP_IP)
2103 				return;
2104 			else
2105 				sc->active_checksum_context = OFFLOAD_TCP_IP;
2106 
2107 		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2108 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2109 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2110 			if (sc->active_checksum_context == OFFLOAD_UDP_IP)
2111 				return;
2112 			else
2113 				sc->active_checksum_context = OFFLOAD_UDP_IP;
2114 		} else {
2115 			*txd_upper = 0;
2116 			*txd_lower = 0;
2117 			return;
2118 		}
2119 	} else {
2120 		*txd_upper = 0;
2121 		*txd_lower = 0;
2122 		return;
2123 	}
2124 
2125 	/* If we reach this point, the checksum offload context
2126 	 * needs to be reset.
2127 	 */
2128 	curr_txd = sc->next_avail_tx_desc;
2129 	tx_buffer = &sc->tx_buffer_area[curr_txd];
2130 	TXD = (struct em_context_desc *) &sc->tx_desc_base[curr_txd];
2131 
2132 	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2133 	TXD->lower_setup.ip_fields.ipcso =
2134 	ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2135 	TXD->lower_setup.ip_fields.ipcse =
2136 	    htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2137 
2138 	TXD->upper_setup.tcp_fields.tucss =
2139 	ETHER_HDR_LEN + sizeof(struct ip);
2140 	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2141 
2142 	if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
2143 		TXD->upper_setup.tcp_fields.tucso =
2144 		ETHER_HDR_LEN + sizeof(struct ip) +
2145 		offsetof(struct tcphdr, th_sum);
2146 	} else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
2147 		TXD->upper_setup.tcp_fields.tucso =
2148 		ETHER_HDR_LEN + sizeof(struct ip) +
2149 		offsetof(struct udphdr, uh_sum);
2150 	}
2151 
2152 	TXD->tcp_seg_setup.data = htole32(0);
2153 	TXD->cmd_and_length = htole32(sc->txd_cmd | E1000_TXD_CMD_DEXT);
2154 
2155 	tx_buffer->m_head = NULL;
2156 
2157 	if (++curr_txd == sc->num_tx_desc)
2158 		curr_txd = 0;
2159 
2160 	sc->num_tx_desc_avail--;
2161 	sc->next_avail_tx_desc = curr_txd;
2162 
2163 	return;
2164 }
2165 #endif /* __FreeBSD__ */
2166 
2167 /**********************************************************************
2168  *
2169  *  Examine each tx_buffer in the used queue. If the hardware is done
2170  *  processing the packet then free associated resources. The
2171  *  tx_buffer is put back on the free queue.
2172  *
2173  **********************************************************************/
2174 void
2175 em_clean_transmit_interrupts(struct em_softc* sc)
2176 {
2177 	int s;
2178 	int i, num_avail;
2179 	struct em_buffer *tx_buffer;
2180 	struct em_tx_desc   *tx_desc;
2181 	struct ifnet   *ifp = &sc->interface_data.ac_if;
2182 
2183 	if (sc->num_tx_desc_avail == sc->num_tx_desc)
2184 		return;
2185 
2186 	s = splimp();
2187 #ifdef DBG_STATS
2188 	sc->clean_tx_interrupts++;
2189 #endif
2190 	num_avail = sc->num_tx_desc_avail;
2191 	i = sc->oldest_used_tx_desc;
2192 
2193 	tx_buffer = &sc->tx_buffer_area[i];
2194 	tx_desc = &sc->tx_desc_base[i];
2195 
2196 	while(tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2197 
2198 		tx_desc->upper.data = 0;
2199 		num_avail++;
2200 
2201 		if (tx_buffer->m_head) {
2202 			ifp->if_opackets++;
2203 			bus_dmamap_sync(sc->txtag, tx_buffer->map,
2204 			    0, tx_buffer->map->dm_mapsize,
2205 			    BUS_DMASYNC_POSTWRITE);
2206 			bus_dmamap_unload(sc->txtag, tx_buffer->map);
2207 			bus_dmamap_destroy(sc->txtag, tx_buffer->map);
2208 
2209 			m_freem(tx_buffer->m_head);
2210 			tx_buffer->m_head = NULL;
2211 		}
2212 
2213 		if (++i == sc->num_tx_desc)
2214 			i = 0;
2215 
2216 		tx_buffer = &sc->tx_buffer_area[i];
2217 		tx_desc = &sc->tx_desc_base[i];
2218 	}
2219 
2220 	sc->oldest_used_tx_desc = i;
2221 
2222 	/*
2223 	 * If we have enough room, clear IFF_OACTIVE to tell the stack
2224 	 * that it is OK to send packets.
2225 	 * If there are no pending descriptors, clear the timeout. Otherwise,
2226 	 * if some descriptors have been freed, restart the timeout.
2227 	 */
2228 	if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2229 		ifp->if_flags &= ~IFF_OACTIVE;
2230 		if (num_avail == sc->num_tx_desc)
2231 			ifp->if_timer = 0;
2232 		else if (num_avail == sc->num_tx_desc_avail)
2233 			ifp->if_timer = EM_TX_TIMEOUT;
2234 	}
2235 	sc->num_tx_desc_avail = num_avail;
2236 	splx(s);
2237 	return;
2238 }
2239 
2240 /*********************************************************************
2241  *
2242  *  Get a buffer from system mbuf buffer pool.
2243  *
2244  **********************************************************************/
2245 int
2246 em_get_buf(int i, struct em_softc *sc,
2247     struct mbuf *nmp)
2248 {
2249 	struct mbuf    *mp = nmp;
2250 	struct em_buffer *rx_buffer;
2251 	struct ifnet   *ifp;
2252 	int error;
2253 
2254 	ifp = &sc->interface_data.ac_if;
2255 
2256 	if (mp == NULL) {
2257 		MGETHDR(mp, M_DONTWAIT, MT_DATA);
2258 		if (mp == NULL) {
2259 			sc->mbuf_alloc_failed++;
2260 			return(ENOBUFS);
2261 		}
2262 		MCLGET(mp, M_DONTWAIT);
2263 		if ((mp->m_flags & M_EXT) == 0) {
2264 			m_freem(mp);
2265 			sc->mbuf_cluster_failed++;
2266 			return(ENOBUFS);
2267 		}
2268 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2269 	} else {
2270 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2271 		mp->m_data = mp->m_ext.ext_buf;
2272 		mp->m_next = NULL;
2273 	}
2274 
2275 	if (ifp->if_mtu <= ETHERMTU) {
2276 		m_adj(mp, ETHER_ALIGN);
2277 	}
2278 
2279 	rx_buffer = &sc->rx_buffer_area[i];
2280 
2281 	/*
2282 	 * Using memory from the mbuf cluster pool, invoke the
2283 	 * bus_dma machinery to arrange the memory mapping.
2284 	 */
2285 	error = bus_dmamap_load(sc->rxtag, rx_buffer->map,
2286 	    mtod(mp, void *), mp->m_len, NULL,
2287 	    0);
2288 	if (error) {
2289 		m_free(mp);
2290 		return(error);
2291 	}
2292 	rx_buffer->m_head = mp;
2293 	sc->rx_desc_base[i].buffer_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr);
2294 	bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0,
2295 	    rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD);
2296 
2297 	return(0);
2298 }
2299 
2300 /*********************************************************************
2301  *
2302  *  Allocate memory for rx_buffer structures. Since we use one
2303  *  rx_buffer per received packet, the maximum number of rx_buffer's
2304  *  that we'll need is equal to the number of receive descriptors
2305  *  that we've allocated.
2306  *
2307  **********************************************************************/
2308 int
2309 em_allocate_receive_structures(struct em_softc* sc)
2310 {
2311 	int		i, error;
2312 	struct em_buffer *rx_buffer;
2313 
2314 	if (!(sc->rx_buffer_area =
2315 	      (struct em_buffer *) malloc(sizeof(struct em_buffer) *
2316 					     sc->num_rx_desc, M_DEVBUF,
2317 					     M_NOWAIT))) {
2318 		printf("%s: Unable to allocate rx_buffer memory\n",
2319 		       sc->sc_dv.dv_xname);
2320 		return(ENOMEM);
2321 	}
2322 
2323 	bzero(sc->rx_buffer_area,
2324 	      sizeof(struct em_buffer) * sc->num_rx_desc);
2325 
2326 #ifdef __FreeBSD__
2327 	error = bus_dma_tag_create(NULL,                /* parent */
2328 				PAGE_SIZE, 0,            /* alignment, bounds */
2329 				BUS_SPACE_MAXADDR,       /* lowaddr */
2330 				BUS_SPACE_MAXADDR,       /* highaddr */
2331 				NULL, NULL,              /* filter, filterarg */
2332 				MCLBYTES,                /* maxsize */
2333 				1,                       /* nsegments */
2334 				MCLBYTES,                /* maxsegsize */
2335 				BUS_DMA_ALLOCNOW,        /* flags */
2336 				&sc->rxtag);
2337 	if (error != 0) {
2338 		printf("%s: em_allocate_receive_structures: "
2339 			"bus_dma_tag_create failed; error %u\n",
2340 			sc->sc_dv.dv_xname, error);
2341 		goto fail_0;
2342 	}
2343 #endif /* __FreeBSD__ */
2344 	sc->rxtag = sc->osdep.em_pa.pa_dmat;
2345 
2346 	rx_buffer = sc->rx_buffer_area;
2347 	for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
2348 		error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
2349 					MCLBYTES, 0, BUS_DMA_NOWAIT,
2350 					&rx_buffer->map);
2351 		if (error != 0) {
2352 			printf("%s: em_allocate_receive_structures: "
2353 			    "bus_dmamap_create failed; error %u\n",
2354 			    sc->sc_dv.dv_xname, error);
2355 			goto fail_1;
2356 		}
2357 	}
2358 
2359 	for (i = 0; i < sc->num_rx_desc; i++) {
2360 		error = em_get_buf(i, sc, NULL);
2361 		if (error != 0) {
2362 			sc->rx_buffer_area[i].m_head = NULL;
2363 			sc->rx_desc_base[i].buffer_addr = 0;
2364 			return(error);
2365                 }
2366         }
2367 
2368         return(0);
2369 
2370 fail_1:
2371 	/* bus_dma_tag_destroy(sc->rxtag); */
2372 /* fail_0: */
2373 	sc->rxtag = NULL;
2374 	free(sc->rx_buffer_area, M_DEVBUF);
2375 	sc->rx_buffer_area = NULL;
2376 	return (error);
2377 }
2378 
2379 /*********************************************************************
2380  *
2381  *  Allocate and initialize receive structures.
2382  *
2383  **********************************************************************/
2384 int
2385 em_setup_receive_structures(struct em_softc * sc)
2386 {
2387 	bzero((void *) sc->rx_desc_base,
2388 	    (sizeof(struct em_rx_desc)) * sc->num_rx_desc);
2389 
2390 	if (em_allocate_receive_structures(sc))
2391 		return ENOMEM;
2392 
2393 	/* Setup our descriptor pointers */
2394 	sc->next_rx_desc_to_check = 0;
2395 	return(0);
2396 }
2397 
2398 /*********************************************************************
2399  *
2400  *  Enable receive unit.
2401  *
2402  **********************************************************************/
2403 void
2404 em_initialize_receive_unit(struct em_softc * sc)
2405 {
2406 	u_int32_t	reg_rctl;
2407 #ifdef __FreeBSD__
2408 	u_int32_t	reg_rxcsum;
2409 #endif
2410 	struct ifnet	*ifp;
2411 	u_int64_t	bus_addr;
2412 
2413 	ifp = &sc->interface_data.ac_if;
2414 
2415 	/* Make sure receives are disabled while setting up the descriptor ring */
2416 	E1000_WRITE_REG(&sc->hw, RCTL, 0);
2417 
2418 	/* Set the Receive Delay Timer Register */
2419 	E1000_WRITE_REG(&sc->hw, RDTR,
2420 			sc->rx_int_delay | E1000_RDT_FPDB);
2421 
2422 	if(sc->hw.mac_type >= em_82540) {
2423 		E1000_WRITE_REG(&sc->hw, RADV, sc->rx_abs_int_delay);
2424 
2425 		/* Set the interrupt throttling rate.  Value is calculated
2426 		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2427 #define MAX_INTS_PER_SEC	8000
2428 #define DEFAULT_ITR		1000000000/(MAX_INTS_PER_SEC * 256)
2429 		E1000_WRITE_REG(&sc->hw, ITR, DEFAULT_ITR);
2430 	}
2431 
2432 	/* Setup the Base and Length of the Rx Descriptor Ring */
2433 	bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
2434 	E1000_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
2435 	E1000_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2436 	E1000_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
2437 			sizeof(struct em_rx_desc));
2438 
2439 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2440 	E1000_WRITE_REG(&sc->hw, RDH, 0);
2441 	E1000_WRITE_REG(&sc->hw, RDT, sc->num_rx_desc - 1);
2442 
2443 	/* Setup the Receive Control Register */
2444 	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2445 		   E1000_RCTL_RDMTS_HALF |
2446 		   (sc->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2447 
2448 	if (sc->hw.tbi_compatibility_on == TRUE)
2449 		reg_rctl |= E1000_RCTL_SBP;
2450 
2451 
2452 	switch (sc->rx_buffer_len) {
2453 	default:
2454 	case EM_RXBUFFER_2048:
2455 		reg_rctl |= E1000_RCTL_SZ_2048;
2456 		break;
2457 	case EM_RXBUFFER_4096:
2458 		reg_rctl |= E1000_RCTL_SZ_4096|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2459 		break;
2460 	case EM_RXBUFFER_8192:
2461 		reg_rctl |= E1000_RCTL_SZ_8192|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2462 		break;
2463 	case EM_RXBUFFER_16384:
2464 		reg_rctl |= E1000_RCTL_SZ_16384|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2465 		break;
2466 	}
2467 
2468 	if (ifp->if_mtu > ETHERMTU)
2469 		reg_rctl |= E1000_RCTL_LPE;
2470 
2471 #ifdef __FreeBSD__
2472 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2473 	if ((sc->hw.mac_type >= em_82543) &&
2474 	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2475 		reg_rxcsum = E1000_READ_REG(&sc->hw, RXCSUM);
2476 		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2477 		E1000_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
2478 	}
2479 #endif /* __FreeBSD__ */
2480 
2481 	/* Enable Receives */
2482 	E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
2483 
2484 	return;
2485 }
2486 
2487 /*********************************************************************
2488  *
2489  *  Free receive related data structures.
2490  *
2491  **********************************************************************/
2492 void
2493 em_free_receive_structures(struct em_softc * sc)
2494 {
2495 	struct em_buffer   *rx_buffer;
2496 	int		i;
2497 
2498 	INIT_DEBUGOUT("free_receive_structures: begin");
2499 
2500 	if (sc->rx_buffer_area != NULL) {
2501 		rx_buffer = sc->rx_buffer_area;
2502 		for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
2503 			if (rx_buffer->map != NULL) {
2504 				bus_dmamap_unload(sc->rxtag, rx_buffer->map);
2505 				bus_dmamap_destroy(sc->rxtag, rx_buffer->map);
2506 			}
2507 			if (rx_buffer->m_head != NULL)
2508 				m_freem(rx_buffer->m_head);
2509 			rx_buffer->m_head = NULL;
2510 		}
2511 	}
2512 	if (sc->rx_buffer_area != NULL) {
2513 		free(sc->rx_buffer_area, M_DEVBUF);
2514 		sc->rx_buffer_area = NULL;
2515 	}
2516 	if (sc->rxtag != NULL) {
2517 		/* bus_dma_tag_destroy(sc->rxtag); */
2518 		sc->rxtag = NULL;
2519 	}
2520 	return;
2521 }
2522 
2523 /*********************************************************************
2524  *
2525  *  This routine executes in interrupt context. It replenishes
2526  *  the mbufs in the descriptor and sends data which has been
2527  *  dma'ed into host memory to upper layer.
2528  *
2529  *  We loop at most count times if count is > 0, or until done if
2530  *  count < 0.
2531  *
2532  *********************************************************************/
2533 void
2534 em_process_receive_interrupts(struct em_softc* sc, int count)
2535 {
2536 	struct ifnet	    *ifp;
2537 	struct mbuf	    *mp;
2538 #ifdef __FreeBSD__
2539 	struct ether_header *eh;
2540 #endif
2541 	u_int8_t	    accept_frame = 0;
2542 	u_int8_t	    eop = 0;
2543 	u_int16_t	    len, desc_len;
2544 	int		    i;
2545 
2546 	/* Pointer to the receive descriptor being examined. */
2547 	struct em_rx_desc   *current_desc;
2548 
2549 	ifp = &sc->interface_data.ac_if;
2550 	i = sc->next_rx_desc_to_check;
2551 	current_desc = &sc->rx_desc_base[i];
2552 
2553 	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2554 #ifdef DBG_STATS
2555 		sc->no_pkts_avail++;
2556 #endif
2557 		return;
2558 	}
2559 
2560 	while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2561 
2562 		mp = sc->rx_buffer_area[i].m_head;
2563 		bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map,
2564 		    0, sc->rx_buffer_area[i].map->dm_mapsize,
2565 		    BUS_DMASYNC_POSTREAD);
2566 		bus_dmamap_unload(sc->rxtag, sc->rx_buffer_area[i].map);
2567 
2568 		accept_frame = 1;
2569 		desc_len = letoh16(current_desc->length);
2570 		if (current_desc->status & E1000_RXD_STAT_EOP) {
2571 			count--;
2572 			eop = 1;
2573 			len = desc_len - ETHER_CRC_LEN;
2574 		} else {
2575 			eop = 0;
2576 			len = desc_len;
2577 		}
2578 
2579 		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2580 			u_int8_t last_byte;
2581 			u_int32_t pkt_len = desc_len;
2582 
2583 			if (sc->fmp != NULL)
2584 				pkt_len += sc->fmp->m_pkthdr.len;
2585 
2586 			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2587 
2588 			if (TBI_ACCEPT(&sc->hw, current_desc->status,
2589 				       current_desc->errors,
2590 				       pkt_len, last_byte)) {
2591 				em_tbi_adjust_stats(&sc->hw,
2592 						    &sc->stats,
2593 						    pkt_len,
2594 						    sc->hw.mac_addr);
2595 				len--;
2596 			}
2597 			else {
2598 				accept_frame = 0;
2599 			}
2600 		}
2601 
2602 		if (accept_frame) {
2603 
2604 			if (em_get_buf(i, sc, NULL) == ENOBUFS) {
2605 				sc->dropped_pkts++;
2606 				em_get_buf(i, sc, mp);
2607 				if (sc->fmp != NULL)
2608 					m_freem(sc->fmp);
2609 				sc->fmp = NULL;
2610 				sc->lmp = NULL;
2611 				break;
2612 			}
2613 
2614 			/* Assign correct length to the current fragment */
2615 			mp->m_len = len;
2616 
2617 			if (sc->fmp == NULL) {
2618 				mp->m_pkthdr.len = len;
2619 				sc->fmp = mp;	 /* Store the first mbuf */
2620 				sc->lmp = mp;
2621 			} else {
2622 				/* Chain mbuf's together */
2623 				mp->m_flags &= ~M_PKTHDR;
2624 				sc->lmp->m_next = mp;
2625 				sc->lmp = sc->lmp->m_next;
2626 				sc->fmp->m_pkthdr.len += len;
2627 			}
2628 
2629 			if (eop) {
2630 				sc->fmp->m_pkthdr.rcvif = ifp;
2631 				ifp->if_ipackets++;
2632 
2633 #if NBPFILTER > 0
2634 				/*
2635 				 * Handle BPF listeners. Let the BPF
2636 				 * user see the packet.
2637 				 */
2638 				if (ifp->if_bpf)
2639 					bpf_mtap(ifp->if_bpf, sc->fmp);
2640 #endif
2641 
2642 #ifdef __FreeBSD__
2643 				eh = mtod(sc->fmp, struct ether_header *);
2644 				/* Remove ethernet header from mbuf */
2645 				m_adj(sc->fmp, sizeof(struct ether_header));
2646 #endif
2647 				em_receive_checksum(sc, current_desc,
2648 						sc->fmp);
2649 
2650 #ifdef __FreeBSD__
2651 				if (current_desc->status & E1000_RXD_STAT_VP)
2652 					VLAN_INPUT_TAG(eh, sc->fmp,
2653 					    (letoh16(current_desc->special) &
2654 					    E1000_RXD_SPC_VLAN_MASK));
2655 				else
2656 					ether_input(ifp, eh, sc->fmp);
2657 #else /* __FreeBSD__ */
2658 				ether_input_mbuf(ifp, sc->fmp);
2659 #endif /* !__FreeBSD__ */
2660 
2661 				sc->fmp = NULL;
2662 				sc->lmp = NULL;
2663 			}
2664 		} else {
2665 			sc->dropped_pkts++;
2666 			em_get_buf(i, sc, mp);
2667 			if (sc->fmp != NULL)
2668 				m_freem(sc->fmp);
2669 			sc->fmp = NULL;
2670 			sc->lmp = NULL;
2671 		}
2672 
2673 		/* Zero out the receive descriptors status  */
2674 		current_desc->status = 0;
2675 
2676 		/* Advance the E1000's Receive Queue #0	 "Tail Pointer". */
2677 		E1000_WRITE_REG(&sc->hw, RDT, i);
2678 
2679 		/* Advance our pointers to the next descriptor */
2680 		if (++i == sc->num_rx_desc) {
2681 			i = 0;
2682 			current_desc = sc->rx_desc_base;
2683 		} else
2684 			current_desc++;
2685 	}
2686 	sc->next_rx_desc_to_check = i;
2687 	return;
2688 }
2689 
2690 /*********************************************************************
2691  *
2692  *  Verify that the hardware indicated that the checksum is valid.
2693  *  Inform the stack about the status of checksum so that stack
2694  *  doesn't spend time verifying the checksum.
2695  *
2696  *********************************************************************/
2697 void
2698 em_receive_checksum(struct em_softc *sc,
2699 		    struct em_rx_desc *rx_desc,
2700 		    struct mbuf *mp)
2701 {
2702 #ifdef __FreeBSD__
2703 	/* 82543 or newer only */
2704 	if ((sc->hw.mac_type < em_82543) ||
2705 	    /* Ignore Checksum bit is set */
2706 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2707 		mp->m_pkthdr.csum_flags = 0;
2708 		return;
2709 	}
2710 
2711 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2712 		/* Did it pass? */
2713 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2714 			/* IP Checksum Good */
2715 			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2716 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2717 
2718 		} else {
2719 			mp->m_pkthdr.csum_flags = 0;
2720 		}
2721 	}
2722 
2723 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2724 		/* Did it pass? */
2725 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2726 			mp->m_pkthdr.csum_flags |=
2727 			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2728 			mp->m_pkthdr.csum_data = htons(0xffff);
2729 		}
2730 	}
2731 
2732 	return;
2733 #else /* __FreeBSD__ */
2734 	/* 82543 or newer only */
2735 	if ((sc->hw.mac_type < em_82543) ||
2736 	    /* Ignore Checksum bit is set */
2737 	    (rx_desc->status & E1000_RXD_STAT_IXSM))
2738 		return;
2739 
2740 	if ((rx_desc->status & (E1000_RXD_STAT_IPCS|E1000_RXD_ERR_IPE)) ==
2741 	    E1000_RXD_STAT_IPCS)
2742 		mp->m_pkthdr.csum |= M_IPV4_CSUM_IN_OK;
2743 
2744 	if ((rx_desc->status & (E1000_RXD_STAT_IPCS|E1000_RXD_ERR_IPE|
2745 	    E1000_RXD_STAT_TCPCS|E1000_RXD_ERR_TCPE)) ==
2746 	    (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_IPCS))
2747 		mp->m_pkthdr.csum |= M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
2748 #endif /* __FreeBSD__ */
2749 }
2750 
2751 
2752 void em_enable_vlans(struct em_softc * sc)
2753 {
2754 	uint32_t ctrl;
2755 
2756 	E1000_WRITE_REG(&sc->hw, VET, ETHERTYPE_8021Q);
2757 
2758 	ctrl = E1000_READ_REG(&sc->hw, CTRL);
2759 	ctrl |= E1000_CTRL_VME;
2760 	E1000_WRITE_REG(&sc->hw, CTRL, ctrl);
2761 
2762 	return;
2763 }
2764 
2765 void
2766 em_enable_intr(struct em_softc* sc)
2767 {
2768 	E1000_WRITE_REG(&sc->hw, IMS, (IMS_ENABLE_MASK));
2769 	return;
2770 }
2771 
2772 void
2773 em_disable_intr(struct em_softc *sc)
2774 {
2775 	E1000_WRITE_REG(&sc->hw, IMC,
2776 			(0xffffffff & ~E1000_IMC_RXSEQ));
2777 	return;
2778 }
2779 
2780 int
2781 em_is_valid_ether_addr(u_int8_t *addr)
2782 {
2783 	const char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2784 
2785 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
2786 		return (FALSE);
2787 	}
2788 
2789 	return(TRUE);
2790 }
2791 
2792 void
2793 em_write_pci_cfg(struct em_hw *hw,
2794 		      uint32_t reg,
2795 		      uint16_t *value)
2796 {
2797 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
2798 	pci_chipset_tag_t pc = pa->pa_pc;
2799 	/* Should we do read/mask/write...?  16 vs 32 bit!!! */
2800 	pci_conf_write(pc, pa->pa_tag, reg, *value);
2801 }
2802 
2803 void
2804 em_read_pci_cfg(struct em_hw *hw, uint32_t reg,
2805 		     uint16_t *value)
2806 {
2807 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
2808 	pci_chipset_tag_t pc = pa->pa_pc;
2809 	*value = pci_conf_read(pc, pa->pa_tag, reg);
2810 	return;
2811 }
2812 
2813 void
2814 em_pci_set_mwi(struct em_hw *hw)
2815 {
2816 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
2817 	pci_chipset_tag_t pc = pa->pa_pc;
2818 	/* Should we do read/mask/write...?  16 vs 32 bit!!! */
2819 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
2820 		(hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE));
2821 
2822 }
2823 
2824 void
2825 em_pci_clear_mwi(struct em_hw *hw)
2826 {
2827 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
2828 	pci_chipset_tag_t pc = pa->pa_pc;
2829 	/* Should we do read/mask/write...?  16 vs 32 bit!!! */
2830 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
2831 		(hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE));
2832 
2833 }
2834 
2835 uint32_t
2836 em_io_read(struct em_hw *hw, uint32_t port)
2837 {
2838 	return bus_space_read_4(((struct em_osdep *)(hw)->back)->em_iobtag,
2839 		((struct em_osdep *)(hw)->back)->em_iobhandle, port);
2840 }
2841 
2842 void
2843 em_io_write(struct em_hw *hw, uint32_t port, uint32_t value)
2844 {
2845 	bus_space_write_4(((struct em_osdep *)(hw)->back)->em_iobtag,
2846 			((struct em_osdep *)(hw)->back)->em_iobhandle, port,
2847 			value);
2848 	return;
2849 }
2850 
2851 /**********************************************************************
2852  *
2853  *  Update the board statistics counters.
2854  *
2855  **********************************************************************/
2856 void
2857 em_update_stats_counters(struct em_softc *sc)
2858 {
2859 	struct ifnet   *ifp;
2860 
2861 	sc->stats.crcerrs += E1000_READ_REG(&sc->hw, CRCERRS);
2862 	sc->stats.symerrs += E1000_READ_REG(&sc->hw, SYMERRS);
2863 	sc->stats.mpc += E1000_READ_REG(&sc->hw, MPC);
2864 	sc->stats.scc += E1000_READ_REG(&sc->hw, SCC);
2865 	sc->stats.ecol += E1000_READ_REG(&sc->hw, ECOL);
2866 
2867 	sc->stats.mcc += E1000_READ_REG(&sc->hw, MCC);
2868 	sc->stats.latecol += E1000_READ_REG(&sc->hw, LATECOL);
2869 	sc->stats.colc += E1000_READ_REG(&sc->hw, COLC);
2870 	sc->stats.dc += E1000_READ_REG(&sc->hw, DC);
2871 	sc->stats.sec += E1000_READ_REG(&sc->hw, SEC);
2872 	sc->stats.rlec += E1000_READ_REG(&sc->hw, RLEC);
2873 	sc->stats.xonrxc += E1000_READ_REG(&sc->hw, XONRXC);
2874 	sc->stats.xontxc += E1000_READ_REG(&sc->hw, XONTXC);
2875 	sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, XOFFRXC);
2876 	sc->stats.xofftxc += E1000_READ_REG(&sc->hw, XOFFTXC);
2877 	sc->stats.fcruc += E1000_READ_REG(&sc->hw, FCRUC);
2878 	sc->stats.prc64 += E1000_READ_REG(&sc->hw, PRC64);
2879 	sc->stats.prc127 += E1000_READ_REG(&sc->hw, PRC127);
2880 	sc->stats.prc255 += E1000_READ_REG(&sc->hw, PRC255);
2881 	sc->stats.prc511 += E1000_READ_REG(&sc->hw, PRC511);
2882 	sc->stats.prc1023 += E1000_READ_REG(&sc->hw, PRC1023);
2883 	sc->stats.prc1522 += E1000_READ_REG(&sc->hw, PRC1522);
2884 	sc->stats.gprc += E1000_READ_REG(&sc->hw, GPRC);
2885 	sc->stats.bprc += E1000_READ_REG(&sc->hw, BPRC);
2886 	sc->stats.mprc += E1000_READ_REG(&sc->hw, MPRC);
2887 	sc->stats.gptc += E1000_READ_REG(&sc->hw, GPTC);
2888 
2889 	/* For the 64-bit byte counters the low dword must be read first. */
2890 	/* Both registers clear on the read of the high dword */
2891 
2892 	sc->stats.gorcl += E1000_READ_REG(&sc->hw, GORCL);
2893 	sc->stats.gorch += E1000_READ_REG(&sc->hw, GORCH);
2894 	sc->stats.gotcl += E1000_READ_REG(&sc->hw, GOTCL);
2895 	sc->stats.gotch += E1000_READ_REG(&sc->hw, GOTCH);
2896 
2897 	sc->stats.rnbc += E1000_READ_REG(&sc->hw, RNBC);
2898 	sc->stats.ruc += E1000_READ_REG(&sc->hw, RUC);
2899 	sc->stats.rfc += E1000_READ_REG(&sc->hw, RFC);
2900 	sc->stats.roc += E1000_READ_REG(&sc->hw, ROC);
2901 	sc->stats.rjc += E1000_READ_REG(&sc->hw, RJC);
2902 
2903 	sc->stats.torl += E1000_READ_REG(&sc->hw, TORL);
2904 	sc->stats.torh += E1000_READ_REG(&sc->hw, TORH);
2905 	sc->stats.totl += E1000_READ_REG(&sc->hw, TOTL);
2906 	sc->stats.toth += E1000_READ_REG(&sc->hw, TOTH);
2907 
2908 	sc->stats.tpr += E1000_READ_REG(&sc->hw, TPR);
2909 	sc->stats.tpt += E1000_READ_REG(&sc->hw, TPT);
2910 	sc->stats.ptc64 += E1000_READ_REG(&sc->hw, PTC64);
2911 	sc->stats.ptc127 += E1000_READ_REG(&sc->hw, PTC127);
2912 	sc->stats.ptc255 += E1000_READ_REG(&sc->hw, PTC255);
2913 	sc->stats.ptc511 += E1000_READ_REG(&sc->hw, PTC511);
2914 	sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, PTC1023);
2915 	sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, PTC1522);
2916 	sc->stats.mptc += E1000_READ_REG(&sc->hw, MPTC);
2917 	sc->stats.bptc += E1000_READ_REG(&sc->hw, BPTC);
2918 
2919 	if (sc->hw.mac_type >= em_82543) {
2920 		sc->stats.algnerrc +=
2921 		E1000_READ_REG(&sc->hw, ALGNERRC);
2922 		sc->stats.rxerrc +=
2923 		E1000_READ_REG(&sc->hw, RXERRC);
2924 		sc->stats.tncrs +=
2925 		E1000_READ_REG(&sc->hw, TNCRS);
2926 		sc->stats.cexterr +=
2927 		E1000_READ_REG(&sc->hw, CEXTERR);
2928 		sc->stats.tsctc +=
2929 		E1000_READ_REG(&sc->hw, TSCTC);
2930 		sc->stats.tsctfc +=
2931 		E1000_READ_REG(&sc->hw, TSCTFC);
2932 	}
2933 	ifp = &sc->interface_data.ac_if;
2934 
2935 	/* Fill out the OS statistics structure */
2936 	ifp->if_ibytes = sc->stats.gorcl;
2937 	ifp->if_obytes = sc->stats.gotcl;
2938 	ifp->if_imcasts = sc->stats.mprc;
2939 	ifp->if_collisions = sc->stats.colc;
2940 
2941 	/* Rx Errors */
2942 	ifp->if_ierrors =
2943 	sc->dropped_pkts +
2944 	sc->stats.rxerrc +
2945 	sc->stats.crcerrs +
2946 	sc->stats.algnerrc +
2947 	sc->stats.rlec + sc->stats.rnbc +
2948 	sc->stats.mpc + sc->stats.cexterr;
2949 
2950 	/* Tx Errors */
2951 	ifp->if_oerrors = sc->stats.ecol + sc->stats.latecol;
2952 
2953 }
2954 
2955 
2956 /**********************************************************************
2957  *
2958  *  This routine is called only when em_display_debug_stats is enabled.
2959  *  This routine provides a way to take a look at important statistics
2960  *  maintained by the driver and hardware.
2961  *
2962  **********************************************************************/
2963 void
2964 em_print_debug_info(struct em_softc *sc)
2965 {
2966 	const char * const unit = sc->sc_dv.dv_xname;
2967 
2968 #ifdef DBG_STATS
2969 	printf("%s: Packets not Avail = %ld\n", unit,
2970 	       sc->no_pkts_avail);
2971 	printf("%s: CleanTxInterrupts = %ld\n", unit,
2972 	       sc->clean_tx_interrupts);
2973 #endif
2974 	printf("%s: fifo workaround = %lld, fifo_reset = %lld\n", unit,
2975 		(long long)sc->tx_fifo_wrk,
2976 		(long long)sc->tx_fifo_reset);
2977 	printf("%s: hw tdh = %d, hw tdt = %d\n", unit,
2978 		E1000_READ_REG(&sc->hw, TDH),
2979 		E1000_READ_REG(&sc->hw, TDT));
2980 	printf("%s: Num Tx Descriptors avail = %ld\n", unit,
2981 	       sc->num_tx_desc_avail);
2982 	printf("%s: Tx Descriptors not avail1 = %ld\n", unit,
2983 	       sc->no_tx_desc_avail1);
2984 	printf("%s: Tx Descriptors not avail2 = %ld\n", unit,
2985 	       sc->no_tx_desc_avail2);
2986 	printf("%s: Std mbuf failed = %ld\n", unit,
2987 		sc->mbuf_alloc_failed);
2988 	printf("%s: Std mbuf cluster failed = %ld\n", unit,
2989 		sc->mbuf_cluster_failed);
2990 	printf("%s: Driver dropped packets = %ld\n", unit,
2991 	       sc->dropped_pkts);
2992 
2993 	return;
2994 }
2995 
2996 void
2997 em_print_hw_stats(struct em_softc *sc)
2998 {
2999 	const char * const unit = sc->sc_dv.dv_xname;
3000 
3001 	printf("%s: Excessive collisions = %lld\n", unit,
3002 		(long long)sc->stats.ecol);
3003 	printf("%s: Symbol errors = %lld\n", unit,
3004 	       (long long)sc->stats.symerrs);
3005 	printf("%s: Sequence errors = %lld\n", unit,
3006 	       (long long)sc->stats.sec);
3007 	printf("%s: Defer count = %lld\n", unit,
3008 	       (long long)sc->stats.dc);
3009 
3010 	printf("%s: Missed Packets = %lld\n", unit,
3011 	       (long long)sc->stats.mpc);
3012 	printf("%s: Receive No Buffers = %lld\n", unit,
3013 	       (long long)sc->stats.rnbc);
3014 	printf("%s: Receive length errors = %lld\n", unit,
3015 	       (long long)sc->stats.rlec);
3016 	printf("%s: Receive errors = %lld\n", unit,
3017 	       (long long)sc->stats.rxerrc);
3018 	printf("%s: Crc errors = %lld\n", unit,
3019 	       (long long)sc->stats.crcerrs);
3020 	printf("%s: Alignment errors = %lld\n", unit,
3021 	       (long long)sc->stats.algnerrc);
3022 	printf("%s: Carrier extension errors = %lld\n", unit,
3023 	       (long long)sc->stats.cexterr);
3024 
3025 	printf("%s: XON Rcvd = %lld\n", unit,
3026 	       (long long)sc->stats.xonrxc);
3027 	printf("%s: XON Xmtd = %lld\n", unit,
3028 	       (long long)sc->stats.xontxc);
3029 	printf("%s: XOFF Rcvd = %lld\n", unit,
3030 	       (long long)sc->stats.xoffrxc);
3031 	printf("%s: XOFF Xmtd = %lld\n", unit,
3032 	       (long long)sc->stats.xofftxc);
3033 
3034 	printf("%s: Good Packets Rcvd = %lld\n", unit,
3035 	       (long long)sc->stats.gprc);
3036 	printf("%s: Good Packets Xmtd = %lld\n", unit,
3037 	       (long long)sc->stats.gptc);
3038 
3039 	return;
3040 }
3041 
3042 #ifdef __FreeBSD__
3043 int
3044 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3045 {
3046 	int error;
3047 	int result;
3048 	struct em_softc *sc;
3049 
3050 	result = -1;
3051 	error = sysctl_handle_int(oidp, &result, 0, req);
3052 
3053 	if (error || !req->newptr)
3054 		return (error);
3055 
3056 	if (result == 1) {
3057 		sc = (struct em_softc *)arg1;
3058 		em_print_debug_info(sc);
3059 	}
3060 
3061 	return error;
3062 }
3063 
3064 
3065 int
3066 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3067 {
3068 	int error;
3069 	int result;
3070 	struct em_softc *sc;
3071 
3072 	result = -1;
3073 	error = sysctl_handle_int(oidp, &result, 0, req);
3074 
3075 	if (error || !req->newptr)
3076 		return (error);
3077 
3078 	if (result == 1) {
3079 		sc = (struct em_softc *)arg1;
3080 		em_print_hw_stats(sc);
3081 	}
3082 
3083 	return error;
3084 }
3085 #endif /* __FreeBSD__ */
3086 
3087