xref: /dflybsd-src/sys/dev/netif/em/if_em.c (revision 7aa7bdf5314aa7edbb815af98e97246e1ee6d889)
1 /**************************************************************************
2 
3 Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>.  All rights reserved.
4 
5 Copyright (c) 2001-2003, Intel Corporation
6 All rights reserved.
7 
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10 
11  1. Redistributions of source code must retain the above copyright notice,
12     this list of conditions and the following disclaimer.
13 
14  2. Redistributions in binary form must reproduce the above copyright
15     notice, this list of conditions and the following disclaimer in the
16     documentation and/or other materials provided with the distribution.
17 
18  3. Neither the name of the Intel Corporation nor the names of its
19     contributors may be used to endorse or promote products derived from
20     this software without specific prior written permission.
21 
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33 
34 ***************************************************************************/
35 
36 /*$FreeBSD: src/sys/dev/em/if_em.c,v 1.2.2.15 2003/06/09 22:10:15 pdeuskar Exp $*/
37 /*$DragonFly: src/sys/dev/netif/em/if_em.c,v 1.26 2005/02/05 23:23:25 joerg Exp $*/
38 
39 #include "if_em.h"
40 
41 /*********************************************************************
42  *  Set this to one to display debug statistics
43  *********************************************************************/
44 int             em_display_debug_stats = 0;
45 
46 /*********************************************************************
47  *  Driver version
48  *********************************************************************/
49 
50 char em_driver_version[] = "1.7.25";
51 
52 
53 /*********************************************************************
54  *  PCI Device ID Table
55  *
56  *  Used by probe to select devices to load on
57  *  Last field stores an index into em_strings
58  *  Last entry must be all 0s
59  *
60  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61  *********************************************************************/
62 
63 static em_vendor_info_t em_vendor_info_array[] =
64 {
65 	/* Intel(R) PRO/1000 Network Connection */
66 	{ 0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0},
67 	{ 0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0},
68 	{ 0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0},
69 	{ 0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0},
70 	{ 0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0},
71 	{ 0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0},
72 	{ 0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0},
73 	{ 0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0},
74 	{ 0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0},
75 	{ 0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0},
76 	{ 0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0},
77 	{ 0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0},
78 	{ 0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0},
79 	{ 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
80 	{ 0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0},
81 	{ 0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0},
82 	{ 0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0},
83 	{ 0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0},
84 	{ 0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0},
85 	{ 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
86 	{ 0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0},
87 	{ 0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0},
88 	{ 0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0},
89 	{ 0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0},
90 	{ 0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0},
91 	{ 0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0},
92 	{ 0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0},
93 	{ 0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0},
94 	{ 0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0},
95 	{ 0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0},
96 	{ 0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0},
97 	{ 0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0},
98 	/* required last entry */
99 	{ 0, 0, 0, 0, 0}
100 };
101 
102 /*********************************************************************
103  *  Table of branding strings for all supported NICs.
104  *********************************************************************/
105 
106 static const char *em_strings[] = {
107 	"Intel(R) PRO/1000 Network Connection"
108 };
109 
110 /*********************************************************************
111  *  Function prototypes
112  *********************************************************************/
113 static int	em_probe(device_t);
114 static int	em_attach(device_t);
115 static int	em_detach(device_t);
116 static int	em_shutdown(device_t);
117 static void	em_intr(void *);
118 static void	em_start(struct ifnet *);
119 static int	em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
120 static void	em_watchdog(struct ifnet *);
121 static void	em_init(void *);
122 static void	em_stop(void *);
123 static void	em_media_status(struct ifnet *, struct ifmediareq *);
124 static int	em_media_change(struct ifnet *);
125 static void	em_identify_hardware(struct adapter *);
126 static void	em_local_timer(void *);
127 static int	em_hardware_init(struct adapter *);
128 static void	em_setup_interface(device_t, struct adapter *);
129 static int	em_setup_transmit_structures(struct adapter *);
130 static void	em_initialize_transmit_unit(struct adapter *);
131 static int	em_setup_receive_structures(struct adapter *);
132 static void	em_initialize_receive_unit(struct adapter *);
133 static void	em_enable_intr(struct adapter *);
134 static void	em_disable_intr(struct adapter *);
135 static void	em_free_transmit_structures(struct adapter *);
136 static void	em_free_receive_structures(struct adapter *);
137 static void	em_update_stats_counters(struct adapter *);
138 static void	em_clean_transmit_interrupts(struct adapter *);
139 static int	em_allocate_receive_structures(struct adapter *);
140 static int	em_allocate_transmit_structures(struct adapter *);
141 static void	em_process_receive_interrupts(struct adapter *, int);
142 static void	em_receive_checksum(struct adapter *, struct em_rx_desc *,
143 				    struct mbuf *);
144 static void	em_transmit_checksum_setup(struct adapter *, struct mbuf *,
145 					   uint32_t *, uint32_t *);
146 static void	em_set_promisc(struct adapter *);
147 static void	em_disable_promisc(struct adapter *);
148 static void	em_set_multi(struct adapter *);
149 static void	em_print_hw_stats(struct adapter *);
150 static void	em_print_link_status(struct adapter *);
151 static int	em_get_buf(int i, struct adapter *, struct mbuf *, int how);
152 static void	em_enable_vlans(struct adapter *);
153 static int	em_encap(struct adapter *, struct mbuf *);
154 static void	em_smartspeed(struct adapter *);
155 static int	em_82547_fifo_workaround(struct adapter *, int);
156 static void	em_82547_update_fifo_head(struct adapter *, int);
157 static int	em_82547_tx_fifo_reset(struct adapter *);
158 static void	em_82547_move_tail(void *arg);
159 static int	em_dma_malloc(struct adapter *, bus_size_t,
160 			      struct em_dma_alloc *, int);
161 static void	em_dma_free(struct adapter *, struct em_dma_alloc *);
162 static void	em_print_debug_info(struct adapter *);
163 static int	em_is_valid_ether_addr(uint8_t *);
164 static int	em_sysctl_stats(SYSCTL_HANDLER_ARGS);
165 static int	em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
166 static uint32_t	em_fill_descriptors(uint64_t address, uint32_t length,
167 				   PDESC_ARRAY desc_array);
168 static int	em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
169 static int	em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
170 static void	em_add_int_delay_sysctl(struct adapter *, const char *,
171 					const char *,
172 					struct em_int_delay_info *, int, int);
173 
174 /*********************************************************************
175  *  FreeBSD Device Interface Entry Points
176  *********************************************************************/
177 
178 static device_method_t em_methods[] = {
179 	/* Device interface */
180 	DEVMETHOD(device_probe, em_probe),
181 	DEVMETHOD(device_attach, em_attach),
182 	DEVMETHOD(device_detach, em_detach),
183 	DEVMETHOD(device_shutdown, em_shutdown),
184 	{0, 0}
185 };
186 
187 static driver_t em_driver = {
188 	"em", em_methods, sizeof(struct adapter),
189 };
190 
191 static devclass_t em_devclass;
192 
193 DECLARE_DUMMY_MODULE(if_em);
194 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
195 
196 /*********************************************************************
197  *  Tunable default values.
198  *********************************************************************/
199 
200 #define E1000_TICKS_TO_USECS(ticks)	((1024 * (ticks) + 500) / 1000)
201 #define E1000_USECS_TO_TICKS(usecs)	((1000 * (usecs) + 512) / 1024)
202 
203 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
204 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
205 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
206 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
207 static int em_int_throttle_ceil = 10000;
208 
209 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
210 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
211 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
212 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
213 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
214 
215 /*********************************************************************
216  *  Device identification routine
217  *
218  *  em_probe determines if the driver should be loaded on
219  *  adapter based on PCI vendor/device id of the adapter.
220  *
221  *  return 0 on success, positive on failure
222  *********************************************************************/
223 
224 static int
225 em_probe(device_t dev)
226 {
227 	em_vendor_info_t *ent;
228 
229 	uint16_t pci_vendor_id = 0;
230 	uint16_t pci_device_id = 0;
231 	uint16_t pci_subvendor_id = 0;
232 	uint16_t pci_subdevice_id = 0;
233 	char adapter_name[60];
234 
235 	INIT_DEBUGOUT("em_probe: begin");
236 
237 	pci_vendor_id = pci_get_vendor(dev);
238 	if (pci_vendor_id != EM_VENDOR_ID)
239 		return(ENXIO);
240 
241 	pci_device_id = pci_get_device(dev);
242 	pci_subvendor_id = pci_get_subvendor(dev);
243 	pci_subdevice_id = pci_get_subdevice(dev);
244 
245 	ent = em_vendor_info_array;
246 	while (ent->vendor_id != 0) {
247 		if ((pci_vendor_id == ent->vendor_id) &&
248 		    (pci_device_id == ent->device_id) &&
249 
250 		    ((pci_subvendor_id == ent->subvendor_id) ||
251 		     (ent->subvendor_id == PCI_ANY_ID)) &&
252 
253 		    ((pci_subdevice_id == ent->subdevice_id) ||
254 		     (ent->subdevice_id == PCI_ANY_ID))) {
255 			snprintf(adapter_name, sizeof(adapter_name),
256 				 "%s, Version - %s",  em_strings[ent->index],
257 				 em_driver_version);
258 			device_set_desc_copy(dev, adapter_name);
259 			return(0);
260 		}
261 		ent++;
262 	}
263 
264 	return(ENXIO);
265 }
266 
267 /*********************************************************************
268  *  Device initialization routine
269  *
270  *  The attach entry point is called when the driver is being loaded.
271  *  This routine identifies the type of hardware, allocates all resources
272  *  and initializes the hardware.
273  *
274  *  return 0 on success, positive on failure
275  *********************************************************************/
276 
277 static int
278 em_attach(device_t dev)
279 {
280 	struct adapter *adapter;
281 	int tsize, rsize;
282 	int i, val, rid;
283 	int error = 0;
284 
285 	INIT_DEBUGOUT("em_attach: begin");
286 
287 	adapter = device_get_softc(dev);
288 
289 	bzero(adapter, sizeof(struct adapter));
290 
291 	callout_init(&adapter->timer);
292 	callout_init(&adapter->tx_fifo_timer);
293 
294 	adapter->dev = dev;
295 	adapter->osdep.dev = dev;
296 
297 	/* SYSCTL stuff */
298 	sysctl_ctx_init(&adapter->sysctl_ctx);
299 	adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
300 					       SYSCTL_STATIC_CHILDREN(_hw),
301 					       OID_AUTO,
302 					       device_get_nameunit(dev),
303 					       CTLFLAG_RD,
304 					       0, "");
305 
306 	if (adapter->sysctl_tree == NULL) {
307 		error = EIO;
308 		goto fail;
309 	}
310 
311 	SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
312 			SYSCTL_CHILDREN(adapter->sysctl_tree),
313 			OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
314 			(void *)adapter, 0,
315 			em_sysctl_debug_info, "I", "Debug Information");
316 
317 	SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
318 			SYSCTL_CHILDREN(adapter->sysctl_tree),
319 			OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
320 			(void *)adapter, 0,
321 			em_sysctl_stats, "I", "Statistics");
322 
323 	/* Determine hardware revision */
324 	em_identify_hardware(adapter);
325 
326 	/* Set up some sysctls for the tunable interrupt delays */
327 	em_add_int_delay_sysctl(adapter, "rx_int_delay",
328 				"receive interrupt delay in usecs",
329 				&adapter->rx_int_delay,
330 				E1000_REG_OFFSET(&adapter->hw, RDTR),
331 				em_rx_int_delay_dflt);
332         em_add_int_delay_sysctl(adapter, "tx_int_delay",
333 				"transmit interrupt delay in usecs",
334 				&adapter->tx_int_delay,
335 				E1000_REG_OFFSET(&adapter->hw, TIDV),
336 				em_tx_int_delay_dflt);
337 	if (adapter->hw.mac_type >= em_82540) {
338 		em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
339 					"receive interrupt delay limit in usecs",
340 					&adapter->rx_abs_int_delay,
341 					E1000_REG_OFFSET(&adapter->hw, RADV),
342 					em_rx_abs_int_delay_dflt);
343 		em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
344 					"transmit interrupt delay limit in usecs",
345 					&adapter->tx_abs_int_delay,
346 					E1000_REG_OFFSET(&adapter->hw, TADV),
347 					em_tx_abs_int_delay_dflt);
348 		SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
349 			SYSCTL_CHILDREN(adapter->sysctl_tree),
350 			OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW,
351 			adapter, 0, em_sysctl_int_throttle, "I", NULL);
352 	}
353 
354 	/* Parameters (to be read from user) */
355 	adapter->num_tx_desc = EM_MAX_TXD;
356 	adapter->num_rx_desc = EM_MAX_RXD;
357 	adapter->hw.autoneg = DO_AUTO_NEG;
358 	adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
359 	adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
360 	adapter->hw.tbi_compatibility_en = TRUE;
361 	adapter->rx_buffer_len = EM_RXBUFFER_2048;
362 
363 	/*
364 	 * These parameters control the automatic generation(Tx) and
365 	 * response(Rx) to Ethernet PAUSE frames.
366 	 */
367 	adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
368 	adapter->hw.fc_low_water  = FC_DEFAULT_LO_THRESH;
369 	adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
370 	adapter->hw.fc_send_xon   = TRUE;
371 	adapter->hw.fc = em_fc_full;
372 
373 	adapter->hw.phy_init_script = 1;
374 	adapter->hw.phy_reset_disable = FALSE;
375 
376 #ifndef EM_MASTER_SLAVE
377 	adapter->hw.master_slave = em_ms_hw_default;
378 #else
379 	adapter->hw.master_slave = EM_MASTER_SLAVE;
380 #endif
381 
382 	/*
383 	 * Set the max frame size assuming standard ethernet
384 	 * sized frames
385 	 */
386 	adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
387 
388 	adapter->hw.min_frame_size =
389 	    MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
390 
391 	/*
392 	 * This controls when hardware reports transmit completion
393 	 * status.
394 	 */
395 	adapter->hw.report_tx_early = 1;
396 
397 	rid = EM_MMBA;
398 	adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
399 						     &rid, RF_ACTIVE);
400 	if (!(adapter->res_memory)) {
401 		device_printf(dev, "Unable to allocate bus resource: memory\n");
402 		error = ENXIO;
403 		goto fail;
404 	}
405 	adapter->osdep.mem_bus_space_tag =
406 	    rman_get_bustag(adapter->res_memory);
407 	adapter->osdep.mem_bus_space_handle =
408 	    rman_get_bushandle(adapter->res_memory);
409 	adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
410 
411 	if (adapter->hw.mac_type > em_82543) {
412 		/* Figure our where our IO BAR is ? */
413 		rid = EM_MMBA;
414 		for (i = 0; i < 5; i++) {
415 			val = pci_read_config(dev, rid, 4);
416 			if (val & 0x00000001) {
417 				adapter->io_rid = rid;
418 				break;
419 			}
420 			rid += 4;
421 		}
422 
423 		adapter->res_ioport = bus_alloc_resource_any(dev,
424 		    SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
425 		if (!(adapter->res_ioport)) {
426 			device_printf(dev, "Unable to allocate bus resource: ioport\n");
427 			error = ENXIO;
428 			goto fail;
429 		}
430 
431 		adapter->hw.reg_io_tag = rman_get_bustag(adapter->res_ioport);
432 		adapter->hw.reg_io_handle = rman_get_bushandle(adapter->res_ioport);
433 	}
434 
435 	rid = 0x0;
436 	adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
437 	    &rid, RF_SHAREABLE | RF_ACTIVE);
438 	if (!(adapter->res_interrupt)) {
439 		device_printf(dev, "Unable to allocate bus resource: interrupt\n");
440 		error = ENXIO;
441 		goto fail;
442 	}
443 
444 	adapter->hw.back = &adapter->osdep;
445 
446 	/* Initialize eeprom parameters */
447 	em_init_eeprom_params(&adapter->hw);
448 
449 	tsize = adapter->num_tx_desc * sizeof(struct em_tx_desc);
450 
451 	/* Allocate Transmit Descriptor ring */
452 	if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_WAITOK)) {
453 		device_printf(dev, "Unable to allocate TxDescriptor memory\n");
454 		error = ENOMEM;
455 		goto fail;
456 	}
457 	adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
458 
459 	rsize = adapter->num_rx_desc * sizeof(struct em_rx_desc);
460 
461 	/* Allocate Receive Descriptor ring */
462 	if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_WAITOK)) {
463 		device_printf(dev, "Unable to allocate rx_desc memory\n");
464 		error = ENOMEM;
465 		goto fail;
466 	}
467 	adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
468 
469 	/* Initialize the hardware */
470 	if (em_hardware_init(adapter)) {
471 		device_printf(dev, "Unable to initialize the hardware\n");
472 		error = EIO;
473 		goto fail;
474 	}
475 
476 	/* Copy the permanent MAC address out of the EEPROM */
477 	if (em_read_mac_addr(&adapter->hw) < 0) {
478 		device_printf(dev, "EEPROM read error while reading mac address\n");
479 		error = EIO;
480 		goto fail;
481 	}
482 
483 	if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
484 		device_printf(dev, "Invalid mac address\n");
485 		error = EIO;
486 		goto fail;
487 	}
488 
489 	/* Setup OS specific network interface */
490 	em_setup_interface(dev, adapter);
491 
492 	/* Initialize statistics */
493 	em_clear_hw_cntrs(&adapter->hw);
494 	em_update_stats_counters(adapter);
495 	adapter->hw.get_link_status = 1;
496 	em_check_for_link(&adapter->hw);
497 
498 	/* Print the link status */
499 	if (adapter->link_active == 1) {
500 		em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed,
501 					&adapter->link_duplex);
502 		device_printf(dev, "Speed: %d Mbps, Duplex: %s\n",
503 		    adapter->link_speed,
504 		    adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
505 	} else
506 		device_printf(dev, "Speed: N/A, Duplex:N/A\n");
507 
508 	/* Identify 82544 on PCIX */
509 	em_get_bus_info(&adapter->hw);
510 	if (adapter->hw.bus_type == em_bus_type_pcix &&
511 	    adapter->hw.mac_type == em_82544)
512 		adapter->pcix_82544 = TRUE;
513         else
514 		adapter->pcix_82544 = FALSE;
515 
516 	error = bus_setup_intr(dev, adapter->res_interrupt, INTR_TYPE_NET,
517 			   (void (*)(void *)) em_intr, adapter,
518 			   &adapter->int_handler_tag);
519 	if (error) {
520 		device_printf(dev, "Error registering interrupt handler!\n");
521 		ether_ifdetach(&adapter->interface_data.ac_if);
522 		goto fail;
523 	}
524 
525 	INIT_DEBUGOUT("em_attach: end");
526 	return(0);
527 
528 fail:
529 	em_detach(dev);
530 	return(error);
531 }
532 
533 /*********************************************************************
534  *  Device removal routine
535  *
536  *  The detach entry point is called when the driver is being removed.
537  *  This routine stops the adapter and deallocates all the resources
538  *  that were allocated for driver operation.
539  *
540  *  return 0 on success, positive on failure
541  *********************************************************************/
542 
543 static int
544 em_detach(device_t dev)
545 {
546 	struct adapter * adapter = device_get_softc(dev);
547 	int s;
548 
549 	INIT_DEBUGOUT("em_detach: begin");
550 	s = splimp();
551 
552 	adapter->in_detach = 1;
553 
554 	if (device_is_attached(dev)) {
555 		em_stop(adapter);
556 		em_phy_hw_reset(&adapter->hw);
557 		ether_ifdetach(&adapter->interface_data.ac_if);
558 	}
559 	bus_generic_detach(dev);
560 
561 	if (adapter->res_interrupt != NULL) {
562 		bus_teardown_intr(dev, adapter->res_interrupt,
563 				  adapter->int_handler_tag);
564 		bus_release_resource(dev, SYS_RES_IRQ, 0,
565 				     adapter->res_interrupt);
566 	}
567 	if (adapter->res_memory != NULL) {
568 		bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA,
569 				     adapter->res_memory);
570 	}
571 
572 	if (adapter->res_ioport != NULL) {
573 		bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
574 				     adapter->res_ioport);
575 	}
576 
577 	/* Free Transmit Descriptor ring */
578 	if (adapter->tx_desc_base != NULL) {
579 		em_dma_free(adapter, &adapter->txdma);
580 		adapter->tx_desc_base = NULL;
581 	}
582 
583 	/* Free Receive Descriptor ring */
584 	if (adapter->rx_desc_base != NULL) {
585 		em_dma_free(adapter, &adapter->rxdma);
586 		adapter->rx_desc_base = NULL;
587 	}
588 
589 	adapter->sysctl_tree = NULL;
590 	sysctl_ctx_free(&adapter->sysctl_ctx);
591 
592 	splx(s);
593 	return(0);
594 }
595 
596 /*********************************************************************
597  *
598  *  Shutdown entry point
599  *
600  **********************************************************************/
601 
602 static int
603 em_shutdown(device_t dev)
604 {
605 	struct adapter *adapter = device_get_softc(dev);
606 	em_stop(adapter);
607 	return(0);
608 }
609 
610 /*********************************************************************
611  *  Transmit entry point
612  *
613  *  em_start is called by the stack to initiate a transmit.
614  *  The driver will remain in this routine as long as there are
615  *  packets to transmit and transmit resources are available.
616  *  In case resources are not available stack is notified and
617  *  the packet is requeued.
618  **********************************************************************/
619 
620 static void
621 em_start(struct ifnet *ifp)
622 {
623 	int s;
624 	struct mbuf *m_head;
625 	struct adapter *adapter = ifp->if_softc;
626 
627 	if (!adapter->link_active)
628 		return;
629 
630 	s = splimp();
631 	while (ifp->if_snd.ifq_head != NULL) {
632 		IF_DEQUEUE(&ifp->if_snd, m_head);
633 
634 		if (m_head == NULL)
635 			break;
636 
637 		if (em_encap(adapter, m_head)) {
638 			ifp->if_flags |= IFF_OACTIVE;
639 			IF_PREPEND(&ifp->if_snd, m_head);
640 			break;
641 		}
642 
643 		/* Send a copy of the frame to the BPF listener */
644 		BPF_MTAP(ifp, m_head);
645 
646 		/* Set timeout in case hardware has problems transmitting */
647 		ifp->if_timer = EM_TX_TIMEOUT;
648 	}
649 	splx(s);
650 }
651 
652 /*********************************************************************
653  *  Ioctl entry point
654  *
655  *  em_ioctl is called when the user wants to configure the
656  *  interface.
657  *
658  *  return 0 on success, positive on failure
659  **********************************************************************/
660 
661 static int
662 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
663 {
664 	int s, mask, error = 0;
665 	struct ifreq *ifr = (struct ifreq *) data;
666 	struct adapter *adapter = ifp->if_softc;
667 
668 	s = splimp();
669 
670 	if (adapter->in_detach)
671 		goto out;
672 
673 	switch (command) {
674 	case SIOCSIFADDR:
675 	case SIOCGIFADDR:
676 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
677 		ether_ioctl(ifp, command, data);
678 		break;
679 	case SIOCSIFMTU:
680 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
681 		if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
682 			error = EINVAL;
683 		} else {
684 			ifp->if_mtu = ifr->ifr_mtu;
685 			adapter->hw.max_frame_size =
686 			ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
687 			em_init(adapter);
688 		}
689 		break;
690 	case SIOCSIFFLAGS:
691 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
692 		if (ifp->if_flags & IFF_UP) {
693 			if (!(ifp->if_flags & IFF_RUNNING))
694 				em_init(adapter);
695 			em_disable_promisc(adapter);
696 			em_set_promisc(adapter);
697 		} else {
698 			if (ifp->if_flags & IFF_RUNNING)
699 				em_stop(adapter);
700 		}
701 		break;
702 	case SIOCADDMULTI:
703 	case SIOCDELMULTI:
704 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
705 		if (ifp->if_flags & IFF_RUNNING) {
706 			em_disable_intr(adapter);
707 			em_set_multi(adapter);
708 			if (adapter->hw.mac_type == em_82542_rev2_0)
709 				em_initialize_receive_unit(adapter);
710 #ifdef DEVICE_POLLING
711 			if (!(ifp->if_flags & IFF_POLLING))
712 #endif
713 				em_enable_intr(adapter);
714 		}
715 		break;
716 	case SIOCSIFMEDIA:
717 	case SIOCGIFMEDIA:
718 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
719 		error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
720 		break;
721 	case SIOCSIFCAP:
722 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
723 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
724 		if (mask & IFCAP_HWCSUM) {
725 			if (IFCAP_HWCSUM & ifp->if_capenable)
726 				ifp->if_capenable &= ~IFCAP_HWCSUM;
727 			else
728 				ifp->if_capenable |= IFCAP_HWCSUM;
729 			if (ifp->if_flags & IFF_RUNNING)
730 				em_init(adapter);
731 		}
732 		break;
733 	default:
734 		IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)\n", (int)command);
735 		error = EINVAL;
736 	}
737 
738 out:
739 	splx(s);
740 	return(error);
741 }
742 
743 /*********************************************************************
744  *  Watchdog entry point
745  *
746  *  This routine is called whenever hardware quits transmitting.
747  *
748  **********************************************************************/
749 
750 static void
751 em_watchdog(struct ifnet *ifp)
752 {
753 	struct adapter * adapter;
754 	adapter = ifp->if_softc;
755 
756 	/* If we are in this routine because of pause frames, then
757 	 * don't reset the hardware.
758 	 */
759 	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
760 		ifp->if_timer = EM_TX_TIMEOUT;
761 		return;
762 	}
763 
764 	if (em_check_for_link(&adapter->hw))
765 		if_printf(ifp, "watchdog timeout -- resetting\n");
766 
767 	ifp->if_flags &= ~IFF_RUNNING;
768 
769 	em_init(adapter);
770 
771 	ifp->if_oerrors++;
772 }
773 
774 /*********************************************************************
775  *  Init entry point
776  *
777  *  This routine is used in two ways. It is used by the stack as
778  *  init entry point in network interface structure. It is also used
779  *  by the driver as a hw/sw initialization routine to get to a
780  *  consistent state.
781  *
782  *  return 0 on success, positive on failure
783  **********************************************************************/
784 
785 static void
786 em_init(void *arg)
787 {
788 	int s;
789 	struct adapter *adapter = arg;
790 	struct ifnet *ifp = &adapter->interface_data.ac_if;
791 
792 	INIT_DEBUGOUT("em_init: begin");
793 
794 	s = splimp();
795 
796 	em_stop(adapter);
797 
798 	/* Get the latest mac address, User can use a LAA */
799 	bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
800 	      ETHER_ADDR_LEN);
801 
802 	/* Initialize the hardware */
803 	if (em_hardware_init(adapter)) {
804 		if_printf(ifp, "Unable to initialize the hardware\n");
805 		splx(s);
806 		return;
807 	}
808 
809 	em_enable_vlans(adapter);
810 
811 	/* Prepare transmit descriptors and buffers */
812 	if (em_setup_transmit_structures(adapter)) {
813 		if_printf(ifp, "Could not setup transmit structures\n");
814 		em_stop(adapter);
815 		splx(s);
816 		return;
817 	}
818 	em_initialize_transmit_unit(adapter);
819 
820 	/* Setup Multicast table */
821 	em_set_multi(adapter);
822 
823 	/* Prepare receive descriptors and buffers */
824 	if (em_setup_receive_structures(adapter)) {
825 		if_printf(ifp, "Could not setup receive structures\n");
826 		em_stop(adapter);
827 		splx(s);
828 		return;
829 	}
830 	em_initialize_receive_unit(adapter);
831 
832 	/* Don't loose promiscuous settings */
833 	em_set_promisc(adapter);
834 
835 	ifp->if_flags |= IFF_RUNNING;
836 	ifp->if_flags &= ~IFF_OACTIVE;
837 
838 	if (adapter->hw.mac_type >= em_82543) {
839 		if (ifp->if_capenable & IFCAP_TXCSUM)
840 			ifp->if_hwassist = EM_CHECKSUM_FEATURES;
841 		else
842 			ifp->if_hwassist = 0;
843 	}
844 
845 	callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
846 	em_clear_hw_cntrs(&adapter->hw);
847 #ifdef DEVICE_POLLING
848 	/*
849 	 * Only enable interrupts if we are not polling, make sure
850 	 * they are off otherwise.
851 	 */
852 	if (ifp->if_flags & IFF_POLLING)
853 		em_disable_intr(adapter);
854 	else
855 #endif /* DEVICE_POLLING */
856 		em_enable_intr(adapter);
857 
858 	/* Don't reset the phy next time init gets called */
859 	adapter->hw.phy_reset_disable = TRUE;
860 
861 	splx(s);
862 }
863 
864 #ifdef DEVICE_POLLING
865 static poll_handler_t em_poll;
866 
867 static void
868 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
869 {
870 	struct adapter *adapter = ifp->if_softc;
871 	uint32_t reg_icr;
872 
873 	if (cmd == POLL_DEREGISTER) {       /* final call, enable interrupts */
874 		em_enable_intr(adapter);
875 		return;
876 	}
877 	if (cmd == POLL_AND_CHECK_STATUS) {
878 		reg_icr = E1000_READ_REG(&adapter->hw, ICR);
879 		if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
880 			callout_stop(&adapter->timer);
881 			adapter->hw.get_link_status = 1;
882 			em_check_for_link(&adapter->hw);
883 			em_print_link_status(adapter);
884 			callout_reset(&adapter->timer, 2*hz, em_local_timer,
885 				      adapter);
886 		}
887 	}
888 	if (ifp->if_flags & IFF_RUNNING) {
889 		em_process_receive_interrupts(adapter, count);
890 		em_clean_transmit_interrupts(adapter);
891 	}
892 
893 	if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
894 		em_start(ifp);
895 }
896 #endif /* DEVICE_POLLING */
897 
898 /*********************************************************************
899  *
900  *  Interrupt Service routine
901  *
902  **********************************************************************/
903 static void
904 em_intr(void *arg)
905 {
906 	uint32_t reg_icr;
907 	struct ifnet *ifp;
908 	struct adapter *adapter = arg;
909 
910 	ifp = &adapter->interface_data.ac_if;
911 
912 #ifdef DEVICE_POLLING
913 	if (ifp->if_flags & IFF_POLLING)
914 		return;
915 
916 	if (ether_poll_register(em_poll, ifp)) {
917 		em_disable_intr(adapter);
918 		em_poll(ifp, 0, 1);
919 		return;
920 	}
921 #endif /* DEVICE_POLLING */
922 
923 	reg_icr = E1000_READ_REG(&adapter->hw, ICR);
924 	if (!reg_icr)
925 		return;
926 
927 	/* Link status change */
928 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
929 		callout_stop(&adapter->timer);
930 		adapter->hw.get_link_status = 1;
931 		em_check_for_link(&adapter->hw);
932 		em_print_link_status(adapter);
933 		callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
934 	}
935 
936 	/*
937 	 * note: do not attempt to improve efficiency by looping.  This
938 	 * only results in unnecessary piecemeal collection of received
939 	 * packets and unnecessary piecemeal cleanups of the transmit ring.
940 	 */
941 	if (ifp->if_flags & IFF_RUNNING) {
942 		em_process_receive_interrupts(adapter, -1);
943 		em_clean_transmit_interrupts(adapter);
944 	}
945 
946 	if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
947 		em_start(ifp);
948 }
949 
950 /*********************************************************************
951  *
952  *  Media Ioctl callback
953  *
954  *  This routine is called whenever the user queries the status of
955  *  the interface using ifconfig.
956  *
957  **********************************************************************/
958 static void
959 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
960 {
961 	struct adapter * adapter = ifp->if_softc;
962 
963 	INIT_DEBUGOUT("em_media_status: begin");
964 
965 	em_check_for_link(&adapter->hw);
966 	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
967 		if (adapter->link_active == 0) {
968 			em_get_speed_and_duplex(&adapter->hw,
969 						&adapter->link_speed,
970 						&adapter->link_duplex);
971 			adapter->link_active = 1;
972 		}
973 	} else {
974 		if (adapter->link_active == 1) {
975 			adapter->link_speed = 0;
976 			adapter->link_duplex = 0;
977 			adapter->link_active = 0;
978 		}
979 	}
980 
981 	ifmr->ifm_status = IFM_AVALID;
982 	ifmr->ifm_active = IFM_ETHER;
983 
984 	if (!adapter->link_active)
985 		return;
986 
987 	ifmr->ifm_status |= IFM_ACTIVE;
988 
989 	if (adapter->hw.media_type == em_media_type_fiber) {
990 		ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
991 	} else {
992 		switch (adapter->link_speed) {
993 		case 10:
994 			ifmr->ifm_active |= IFM_10_T;
995 			break;
996 		case 100:
997 			ifmr->ifm_active |= IFM_100_TX;
998 			break;
999 		case 1000:
1000 			ifmr->ifm_active |= IFM_1000_TX;
1001 			break;
1002 		}
1003 		if (adapter->link_duplex == FULL_DUPLEX)
1004 			ifmr->ifm_active |= IFM_FDX;
1005 		else
1006 			ifmr->ifm_active |= IFM_HDX;
1007 	}
1008 }
1009 
1010 /*********************************************************************
1011  *
1012  *  Media Ioctl callback
1013  *
1014  *  This routine is called when the user changes speed/duplex using
1015  *  media/mediopt option with ifconfig.
1016  *
1017  **********************************************************************/
1018 static int
1019 em_media_change(struct ifnet *ifp)
1020 {
1021 	struct adapter * adapter = ifp->if_softc;
1022 	struct ifmedia  *ifm = &adapter->media;
1023 
1024 	INIT_DEBUGOUT("em_media_change: begin");
1025 
1026 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1027 		return(EINVAL);
1028 
1029 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1030 	case IFM_AUTO:
1031 		adapter->hw.autoneg = DO_AUTO_NEG;
1032 		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1033 		break;
1034 	case IFM_1000_SX:
1035 	case IFM_1000_TX:
1036 		adapter->hw.autoneg = DO_AUTO_NEG;
1037 		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1038 		break;
1039 	case IFM_100_TX:
1040 		adapter->hw.autoneg = FALSE;
1041 		adapter->hw.autoneg_advertised = 0;
1042 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1043 			adapter->hw.forced_speed_duplex = em_100_full;
1044 		else
1045 			adapter->hw.forced_speed_duplex	= em_100_half;
1046 		break;
1047 	case IFM_10_T:
1048 		adapter->hw.autoneg = FALSE;
1049 		adapter->hw.autoneg_advertised = 0;
1050 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1051 			adapter->hw.forced_speed_duplex = em_10_full;
1052 		else
1053 			adapter->hw.forced_speed_duplex	= em_10_half;
1054 		break;
1055 	default:
1056 		if_printf(ifp, "Unsupported media type\n");
1057 	}
1058 	/*
1059 	 * As the speed/duplex settings may have changed we need to
1060 	 * reset the PHY.
1061 	 */
1062 	adapter->hw.phy_reset_disable = FALSE;
1063 
1064 	em_init(adapter);
1065 
1066 	return(0);
1067 }
1068 
1069 static void
1070 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1071 	 int error)
1072 {
1073 	struct em_q *q = arg;
1074 
1075 	if (error)
1076 		return;
1077 	KASSERT(nsegs <= EM_MAX_SCATTER,
1078 		("Too many DMA segments returned when mapping tx packet"));
1079 	q->nsegs = nsegs;
1080 	bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1081 }
1082 
1083 #define EM_FIFO_HDR              0x10
1084 #define EM_82547_PKT_THRESH      0x3e0
1085 #define EM_82547_TX_FIFO_SIZE    0x2800
1086 #define EM_82547_TX_FIFO_BEGIN   0xf00
1087 /*********************************************************************
1088  *
1089  *  This routine maps the mbufs to tx descriptors.
1090  *
1091  *  return 0 on success, positive on failure
1092  **********************************************************************/
1093 static int
1094 em_encap(struct adapter *adapter, struct mbuf *m_head)
1095 {
1096 	uint32_t txd_upper;
1097 	uint32_t txd_lower, txd_used = 0, txd_saved = 0;
1098 	int i, j, error;
1099 	uint64_t address;
1100 
1101 	/* For 82544 Workaround */
1102 	DESC_ARRAY desc_array;
1103 	uint32_t array_elements;
1104 	uint32_t counter;
1105 
1106 	struct ifvlan *ifv = NULL;
1107 	struct em_q q;
1108         struct em_buffer *tx_buffer = NULL;
1109         struct em_tx_desc *current_tx_desc = NULL;
1110         struct ifnet *ifp = &adapter->interface_data.ac_if;
1111 
1112 	/*
1113 	 * Force a cleanup if number of TX descriptors
1114 	 * available hits the threshold
1115 	 */
1116 	if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1117 		em_clean_transmit_interrupts(adapter);
1118 		if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1119 			adapter->no_tx_desc_avail1++;
1120 			return(ENOBUFS);
1121 		}
1122 	}
1123 	/*
1124 	 * Map the packet for DMA.
1125 	 */
1126 	if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &q.map)) {
1127 		adapter->no_tx_map_avail++;
1128 		return(ENOMEM);
1129 	}
1130 	error = bus_dmamap_load_mbuf(adapter->txtag, q.map, m_head, em_tx_cb,
1131 				     &q, BUS_DMA_NOWAIT);
1132 	if (error != 0) {
1133 		adapter->no_tx_dma_setup++;
1134 		bus_dmamap_destroy(adapter->txtag, q.map);
1135 		return(error);
1136 	}
1137 	KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1138 
1139 	if (q.nsegs > adapter->num_tx_desc_avail) {
1140 		adapter->no_tx_desc_avail2++;
1141 		bus_dmamap_unload(adapter->txtag, q.map);
1142 		bus_dmamap_destroy(adapter->txtag, q.map);
1143 		return(ENOBUFS);
1144 	}
1145 
1146 	if (ifp->if_hwassist > 0) {
1147 		em_transmit_checksum_setup(adapter,  m_head,
1148 					   &txd_upper, &txd_lower);
1149 	}
1150 	else
1151 		txd_upper = txd_lower = 0;
1152 
1153 	/* Find out if we are in vlan mode */
1154 	if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1155 	    m_head->m_pkthdr.rcvif != NULL &&
1156 	    m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1157 		ifv = m_head->m_pkthdr.rcvif->if_softc;
1158 
1159 	i = adapter->next_avail_tx_desc;
1160 	if (adapter->pcix_82544) {
1161 		txd_saved = i;
1162 		txd_used = 0;
1163 	}
1164 	for (j = 0; j < q.nsegs; j++) {
1165 		/* If adapter is 82544 and on PCIX bus */
1166 		if(adapter->pcix_82544) {
1167 			array_elements = 0;
1168 			address = htole64(q.segs[j].ds_addr);
1169 			/*
1170 			 * Check the Address and Length combination and
1171 			 * split the data accordingly
1172 			 */
1173 			array_elements = em_fill_descriptors(address,
1174 							     htole32(q.segs[j].ds_len),
1175 							     &desc_array);
1176 			for (counter = 0; counter < array_elements; counter++) {
1177 				if (txd_used == adapter->num_tx_desc_avail) {
1178 					adapter->next_avail_tx_desc = txd_saved;
1179 					adapter->no_tx_desc_avail2++;
1180 					bus_dmamap_unload(adapter->txtag, q.map);
1181 					bus_dmamap_destroy(adapter->txtag, q.map);
1182 					return(ENOBUFS);
1183 				}
1184 				tx_buffer = &adapter->tx_buffer_area[i];
1185 				current_tx_desc = &adapter->tx_desc_base[i];
1186 				current_tx_desc->buffer_addr = htole64(
1187 				desc_array.descriptor[counter].address);
1188 				current_tx_desc->lower.data = htole32(
1189 				(adapter->txd_cmd | txd_lower |
1190 				(uint16_t)desc_array.descriptor[counter].length));
1191 				current_tx_desc->upper.data = htole32((txd_upper));
1192 				if (++i == adapter->num_tx_desc)
1193 					i = 0;
1194 
1195 				tx_buffer->m_head = NULL;
1196 				txd_used++;
1197 			}
1198 		} else {
1199 			tx_buffer = &adapter->tx_buffer_area[i];
1200 			current_tx_desc = &adapter->tx_desc_base[i];
1201 
1202 			current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1203 			current_tx_desc->lower.data = htole32(
1204 				adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1205 			current_tx_desc->upper.data = htole32(txd_upper);
1206 
1207 			if (++i == adapter->num_tx_desc)
1208 				i = 0;
1209 
1210 			tx_buffer->m_head = NULL;
1211 		}
1212 	}
1213 
1214 	adapter->next_avail_tx_desc = i;
1215 	if (adapter->pcix_82544)
1216 		adapter->num_tx_desc_avail -= txd_used;
1217 	else
1218 		adapter->num_tx_desc_avail -= q.nsegs;
1219 
1220 	if (ifv != NULL) {
1221 		/* Set the vlan id */
1222 		current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1223 
1224 		/* Tell hardware to add tag */
1225 		current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1226 	}
1227 
1228 	tx_buffer->m_head = m_head;
1229 	tx_buffer->map = q.map;
1230 	bus_dmamap_sync(adapter->txtag, q.map, BUS_DMASYNC_PREWRITE);
1231 
1232 	/*
1233 	 * Last Descriptor of Packet needs End Of Packet (EOP)
1234 	 */
1235 	current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1236 
1237 	/*
1238 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1239 	 * that this frame is available to transmit.
1240 	 */
1241 	if (adapter->hw.mac_type == em_82547 &&
1242 	    adapter->link_duplex == HALF_DUPLEX) {
1243 		em_82547_move_tail(adapter);
1244 	} else {
1245 		E1000_WRITE_REG(&adapter->hw, TDT, i);
1246 		if (adapter->hw.mac_type == em_82547) {
1247 			em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1248 		}
1249 	}
1250 
1251 	return(0);
1252 }
1253 
1254 /*********************************************************************
1255  *
1256  * 82547 workaround to avoid controller hang in half-duplex environment.
1257  * The workaround is to avoid queuing a large packet that would span
1258  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1259  * in this case. We do that only when FIFO is quiescent.
1260  *
1261  **********************************************************************/
1262 static void
1263 em_82547_move_tail(void *arg)
1264 {
1265 	int s;
1266 	struct adapter *adapter = arg;
1267 	uint16_t hw_tdt;
1268 	uint16_t sw_tdt;
1269 	struct em_tx_desc *tx_desc;
1270 	uint16_t length = 0;
1271 	boolean_t eop = 0;
1272 
1273 	s = splimp();
1274 	hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1275 	sw_tdt = adapter->next_avail_tx_desc;
1276 
1277 	while (hw_tdt != sw_tdt) {
1278 		tx_desc = &adapter->tx_desc_base[hw_tdt];
1279 		length += tx_desc->lower.flags.length;
1280 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1281 		if(++hw_tdt == adapter->num_tx_desc)
1282 			hw_tdt = 0;
1283 
1284 		if(eop) {
1285 			if (em_82547_fifo_workaround(adapter, length)) {
1286 				adapter->tx_fifo_wrk++;
1287 				callout_reset(&adapter->tx_fifo_timer, 1,
1288 					em_82547_move_tail, adapter);
1289 				break;
1290 			}
1291 			E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1292 			em_82547_update_fifo_head(adapter, length);
1293 			length = 0;
1294 		}
1295 	}
1296 	splx(s);
1297 }
1298 
1299 static int
1300 em_82547_fifo_workaround(struct adapter *adapter, int len)
1301 {
1302 	int fifo_space, fifo_pkt_len;
1303 
1304 	fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1305 
1306 	if (adapter->link_duplex == HALF_DUPLEX) {
1307 		fifo_space = EM_82547_TX_FIFO_SIZE - adapter->tx_fifo_head;
1308 
1309 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1310 			if (em_82547_tx_fifo_reset(adapter))
1311 				return(0);
1312 			else
1313 				return(1);
1314 		}
1315 	}
1316 
1317 	return(0);
1318 }
1319 
1320 static void
1321 em_82547_update_fifo_head(struct adapter *adapter, int len)
1322 {
1323 	int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1324 
1325 	/* tx_fifo_head is always 16 byte aligned */
1326 	adapter->tx_fifo_head += fifo_pkt_len;
1327 	if (adapter->tx_fifo_head >= EM_82547_TX_FIFO_SIZE)
1328 		adapter->tx_fifo_head -= EM_82547_TX_FIFO_SIZE;
1329 }
1330 
1331 static int
1332 em_82547_tx_fifo_reset(struct adapter *adapter)
1333 {
1334 	uint32_t tctl;
1335 
1336 	if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1337 	      E1000_READ_REG(&adapter->hw, TDH)) &&
1338 	     (E1000_READ_REG(&adapter->hw, TDFT) ==
1339 	      E1000_READ_REG(&adapter->hw, TDFH)) &&
1340 	     (E1000_READ_REG(&adapter->hw, TDFTS) ==
1341 	      E1000_READ_REG(&adapter->hw, TDFHS)) &&
1342 	     (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1343 
1344 		/* Disable TX unit */
1345 		tctl = E1000_READ_REG(&adapter->hw, TCTL);
1346 		E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1347 
1348 		/* Reset FIFO pointers */
1349 		E1000_WRITE_REG(&adapter->hw, TDFT, EM_82547_TX_FIFO_BEGIN);
1350 		E1000_WRITE_REG(&adapter->hw, TDFH, EM_82547_TX_FIFO_BEGIN);
1351 		E1000_WRITE_REG(&adapter->hw, TDFTS, EM_82547_TX_FIFO_BEGIN);
1352 		E1000_WRITE_REG(&adapter->hw, TDFHS, EM_82547_TX_FIFO_BEGIN);
1353 
1354 		/* Re-enable TX unit */
1355 		E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1356 		E1000_WRITE_FLUSH(&adapter->hw);
1357 
1358 		adapter->tx_fifo_head = 0;
1359 		adapter->tx_fifo_reset++;
1360 
1361 		return(TRUE);
1362 	}
1363 	else {
1364 		return(FALSE);
1365 	}
1366 }
1367 
1368 static void
1369 em_set_promisc(struct adapter *adapter)
1370 {
1371 	uint32_t reg_rctl;
1372 	struct ifnet *ifp = &adapter->interface_data.ac_if;
1373 
1374 	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1375 
1376 	if (ifp->if_flags & IFF_PROMISC) {
1377 		reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1378 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1379 	} else if (ifp->if_flags & IFF_ALLMULTI) {
1380 		reg_rctl |= E1000_RCTL_MPE;
1381 		reg_rctl &= ~E1000_RCTL_UPE;
1382 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1383 	}
1384 }
1385 
1386 static void
1387 em_disable_promisc(struct adapter *adapter)
1388 {
1389 	uint32_t reg_rctl;
1390 
1391 	reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1392 
1393 	reg_rctl &=  (~E1000_RCTL_UPE);
1394 	reg_rctl &=  (~E1000_RCTL_MPE);
1395 	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1396 }
1397 
1398 /*********************************************************************
1399  *  Multicast Update
1400  *
1401  *  This routine is called whenever multicast address list is updated.
1402  *
1403  **********************************************************************/
1404 
1405 static void
1406 em_set_multi(struct adapter *adapter)
1407 {
1408 	uint32_t reg_rctl = 0;
1409 	uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1410 	struct ifmultiaddr *ifma;
1411 	int mcnt = 0;
1412 	struct ifnet *ifp = &adapter->interface_data.ac_if;
1413 
1414 	IOCTL_DEBUGOUT("em_set_multi: begin");
1415 
1416 	if (adapter->hw.mac_type == em_82542_rev2_0) {
1417 		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1418 		if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1419 			em_pci_clear_mwi(&adapter->hw);
1420 		reg_rctl |= E1000_RCTL_RST;
1421 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1422 		msec_delay(5);
1423 	}
1424 
1425 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1426 		if (ifma->ifma_addr->sa_family != AF_LINK)
1427 			continue;
1428 
1429 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1430 			break;
1431 
1432 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1433 		      &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1434 		mcnt++;
1435 	}
1436 
1437 	if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1438 		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1439 		reg_rctl |= E1000_RCTL_MPE;
1440 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1441 	} else
1442 		em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1443 
1444 	if (adapter->hw.mac_type == em_82542_rev2_0) {
1445 		reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1446 		reg_rctl &= ~E1000_RCTL_RST;
1447 		E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1448 		msec_delay(5);
1449 		if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1450                         em_pci_set_mwi(&adapter->hw);
1451 	}
1452 }
1453 
1454 /*********************************************************************
1455  *  Timer routine
1456  *
1457  *  This routine checks for link status and updates statistics.
1458  *
1459  **********************************************************************/
1460 
1461 static void
1462 em_local_timer(void *arg)
1463 {
1464 	int s;
1465 	struct ifnet *ifp;
1466 	struct adapter *adapter = arg;
1467 	ifp = &adapter->interface_data.ac_if;
1468 
1469 	s = splimp();
1470 
1471 	em_check_for_link(&adapter->hw);
1472 	em_print_link_status(adapter);
1473 	em_update_stats_counters(adapter);
1474 	if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1475 		em_print_hw_stats(adapter);
1476 	em_smartspeed(adapter);
1477 
1478 	callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
1479 
1480 	splx(s);
1481 }
1482 
1483 static void
1484 em_print_link_status(struct adapter *adapter)
1485 {
1486 	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1487 		if (adapter->link_active == 0) {
1488 			em_get_speed_and_duplex(&adapter->hw,
1489 						&adapter->link_speed,
1490 						&adapter->link_duplex);
1491 			device_printf(adapter->dev, "Link is up %d Mbps %s\n",
1492 			       adapter->link_speed,
1493 			       ((adapter->link_duplex == FULL_DUPLEX) ?
1494 				"Full Duplex" : "Half Duplex"));
1495 			adapter->link_active = 1;
1496 			adapter->smartspeed = 0;
1497 		}
1498 	} else {
1499 		if (adapter->link_active == 1) {
1500 			adapter->link_speed = 0;
1501 			adapter->link_duplex = 0;
1502 			device_printf(adapter->dev, "Link is Down\n");
1503 			adapter->link_active = 0;
1504 		}
1505 	}
1506 }
1507 
1508 /*********************************************************************
1509  *
1510  *  This routine disables all traffic on the adapter by issuing a
1511  *  global reset on the MAC and deallocates TX/RX buffers.
1512  *
1513  **********************************************************************/
1514 
1515 static void
1516 em_stop(void *arg)
1517 {
1518 	struct ifnet   *ifp;
1519 	struct adapter * adapter = arg;
1520 	ifp = &adapter->interface_data.ac_if;
1521 
1522 	INIT_DEBUGOUT("em_stop: begin");
1523 	em_disable_intr(adapter);
1524 	em_reset_hw(&adapter->hw);
1525 	callout_stop(&adapter->timer);
1526 	callout_stop(&adapter->tx_fifo_timer);
1527 	em_free_transmit_structures(adapter);
1528 	em_free_receive_structures(adapter);
1529 
1530 	/* Tell the stack that the interface is no longer active */
1531 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1532 	ifp->if_timer = 0;
1533 }
1534 
1535 /*********************************************************************
1536  *
1537  *  Determine hardware revision.
1538  *
1539  **********************************************************************/
1540 static void
1541 em_identify_hardware(struct adapter * adapter)
1542 {
1543 	device_t dev = adapter->dev;
1544 
1545 	/* Make sure our PCI config space has the necessary stuff set */
1546 	adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1547 	if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1548 	      (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1549 		device_printf(dev, "Memory Access and/or Bus Master bits were not set!\n");
1550 		adapter->hw.pci_cmd_word |=
1551 		(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1552 		pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1553 	}
1554 
1555 	/* Save off the information about this board */
1556 	adapter->hw.vendor_id = pci_get_vendor(dev);
1557 	adapter->hw.device_id = pci_get_device(dev);
1558 	adapter->hw.revision_id = pci_get_revid(dev);
1559 	adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1560 	adapter->hw.subsystem_id = pci_get_subdevice(dev);
1561 
1562 	/* Identify the MAC */
1563 	if (em_set_mac_type(&adapter->hw))
1564 		device_printf(dev, "Unknown MAC Type\n");
1565 
1566 	if (adapter->hw.mac_type == em_82541 ||
1567 	    adapter->hw.mac_type == em_82541_rev_2 ||
1568 	    adapter->hw.mac_type == em_82547 ||
1569 	    adapter->hw.mac_type == em_82547_rev_2)
1570 		adapter->hw.phy_init_script = TRUE;
1571 }
1572 
1573 /*********************************************************************
1574  *
1575  *  Initialize the hardware to a configuration as specified by the
1576  *  adapter structure. The controller is reset, the EEPROM is
1577  *  verified, the MAC address is set, then the shared initialization
1578  *  routines are called.
1579  *
1580  **********************************************************************/
1581 static int
1582 em_hardware_init(struct adapter *adapter)
1583 {
1584 	INIT_DEBUGOUT("em_hardware_init: begin");
1585 	/* Issue a global reset */
1586 	em_reset_hw(&adapter->hw);
1587 
1588 	/* When hardware is reset, fifo_head is also reset */
1589 	adapter->tx_fifo_head = 0;
1590 
1591 	/* Make sure we have a good EEPROM before we read from it */
1592 	if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1593 		device_printf(adapter->dev, "The EEPROM Checksum Is Not Valid\n");
1594 		return(EIO);
1595 	}
1596 
1597 	if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1598 		device_printf(adapter->dev, "EEPROM read error while reading part number\n");
1599 		return(EIO);
1600 	}
1601 
1602 	if (em_init_hw(&adapter->hw) < 0) {
1603 		device_printf(adapter->dev, "Hardware Initialization Failed");
1604 		return(EIO);
1605 	}
1606 
1607 	em_check_for_link(&adapter->hw);
1608 	if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1609 		adapter->link_active = 1;
1610 	else
1611 		adapter->link_active = 0;
1612 
1613 	if (adapter->link_active) {
1614 		em_get_speed_and_duplex(&adapter->hw,
1615 					&adapter->link_speed,
1616 					&adapter->link_duplex);
1617 	} else {
1618 		adapter->link_speed = 0;
1619 		adapter->link_duplex = 0;
1620 	}
1621 
1622 	return(0);
1623 }
1624 
1625 /*********************************************************************
1626  *
1627  *  Setup networking device structure and register an interface.
1628  *
1629  **********************************************************************/
1630 static void
1631 em_setup_interface(device_t dev, struct adapter *adapter)
1632 {
1633 	struct ifnet   *ifp;
1634 	INIT_DEBUGOUT("em_setup_interface: begin");
1635 
1636 	ifp = &adapter->interface_data.ac_if;
1637 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1638 	ifp->if_mtu = ETHERMTU;
1639 	ifp->if_baudrate = 1000000000;
1640 	ifp->if_init =  em_init;
1641 	ifp->if_softc = adapter;
1642 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1643 	ifp->if_ioctl = em_ioctl;
1644 	ifp->if_start = em_start;
1645 	ifp->if_watchdog = em_watchdog;
1646 	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1647 
1648 	ether_ifattach(ifp, adapter->hw.mac_addr);
1649 
1650 	if (adapter->hw.mac_type >= em_82543) {
1651 		ifp->if_capabilities = IFCAP_HWCSUM;
1652 		ifp->if_capenable = ifp->if_capabilities;
1653 	}
1654 
1655 	/*
1656 	 * Tell the upper layer(s) we support long frames.
1657 	 */
1658 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1659         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1660 
1661 	/*
1662 	 * Specify the media types supported by this adapter and register
1663 	 * callbacks to update media and link information
1664 	 */
1665 	ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1666 		     em_media_status);
1667 	if (adapter->hw.media_type == em_media_type_fiber) {
1668 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1669 			    0, NULL);
1670 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1671 			    0, NULL);
1672 	} else {
1673 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1674 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1675 			    0, NULL);
1676 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
1677 			    0, NULL);
1678 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1679 			    0, NULL);
1680 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_TX | IFM_FDX,
1681 			    0, NULL);
1682 		ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_TX, 0, NULL);
1683 	}
1684 	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1685 	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1686 }
1687 
1688 /*********************************************************************
1689  *
1690  *  Workaround for SmartSpeed on 82541 and 82547 controllers
1691  *
1692  **********************************************************************/
1693 static void
1694 em_smartspeed(struct adapter *adapter)
1695 {
1696 	uint16_t phy_tmp;
1697 
1698 	if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
1699 	    !adapter->hw.autoneg ||
1700 	    !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1701 		return;
1702 
1703 	if (adapter->smartspeed == 0) {
1704 		/*
1705 		 * If Master/Slave config fault is asserted twice,
1706 		 * we assume back-to-back.
1707 		 */
1708 		em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1709 		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1710 			return;
1711 		em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1712 		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1713 			em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1714 					&phy_tmp);
1715 			if (phy_tmp & CR_1000T_MS_ENABLE) {
1716 				phy_tmp &= ~CR_1000T_MS_ENABLE;
1717 				em_write_phy_reg(&adapter->hw,
1718 						 PHY_1000T_CTRL, phy_tmp);
1719 				adapter->smartspeed++;
1720 				if (adapter->hw.autoneg &&
1721 				    !em_phy_setup_autoneg(&adapter->hw) &&
1722 				    !em_read_phy_reg(&adapter->hw, PHY_CTRL,
1723 						     &phy_tmp)) {
1724 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
1725 						    MII_CR_RESTART_AUTO_NEG);
1726 					em_write_phy_reg(&adapter->hw,
1727 							 PHY_CTRL, phy_tmp);
1728 				}
1729 			}
1730 		}
1731                 return;
1732 	} else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1733 		/* If still no link, perhaps using 2/3 pair cable */
1734 		em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
1735 		phy_tmp |= CR_1000T_MS_ENABLE;
1736 		em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
1737 		if (adapter->hw.autoneg &&
1738 		    !em_phy_setup_autoneg(&adapter->hw) &&
1739 		    !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
1740 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
1741 				    MII_CR_RESTART_AUTO_NEG);
1742 			em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
1743 		}
1744 	}
1745 	/* Restart process after EM_SMARTSPEED_MAX iterations */
1746 	if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
1747 		adapter->smartspeed = 0;
1748 }
1749 
1750 /*
1751  * Manage DMA'able memory.
1752  */
1753 static void
1754 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1755 {
1756 	if (error)
1757 		return;
1758 	*(bus_addr_t*) arg = segs->ds_addr;
1759 }
1760 
1761 static int
1762 em_dma_malloc(struct adapter *adapter, bus_size_t size,
1763 	      struct em_dma_alloc *dma, int mapflags)
1764 {
1765 	int r;
1766 	device_t dev = adapter->dev;
1767 
1768 	r = bus_dma_tag_create(NULL,                    /* parent */
1769 			       PAGE_SIZE, 0,            /* alignment, bounds */
1770 			       BUS_SPACE_MAXADDR,       /* lowaddr */
1771 			       BUS_SPACE_MAXADDR,       /* highaddr */
1772 			       NULL, NULL,              /* filter, filterarg */
1773 			       size,                    /* maxsize */
1774 			       1,                       /* nsegments */
1775 			       size,                    /* maxsegsize */
1776 			       BUS_DMA_ALLOCNOW,        /* flags */
1777 			       &dma->dma_tag);
1778 	if (r != 0) {
1779 		device_printf(dev, "em_dma_malloc: bus_dma_tag_create failed; "
1780 			      "error %u\n", r);
1781 		goto fail_0;
1782 	}
1783 
1784 	r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1785 			     BUS_DMA_NOWAIT, &dma->dma_map);
1786 	if (r != 0) {
1787 		device_printf(dev, "em_dma_malloc: bus_dmammem_alloc failed; "
1788 			      "size %llu, error %d\n", (uintmax_t)size, r);
1789 		goto fail_2;
1790 	}
1791 
1792 	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1793 			    size,
1794 			    em_dmamap_cb,
1795 			    &dma->dma_paddr,
1796 			    mapflags | BUS_DMA_NOWAIT);
1797 	if (r != 0) {
1798 		device_printf(dev, "em_dma_malloc: bus_dmamap_load failed; "
1799 			      "error %u\n", r);
1800 		goto fail_3;
1801 	}
1802 
1803 	dma->dma_size = size;
1804 	return(0);
1805 
1806 fail_3:
1807 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1808 fail_2:
1809 	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1810 	bus_dma_tag_destroy(dma->dma_tag);
1811 fail_0:
1812 	dma->dma_map = NULL;
1813 	dma->dma_tag = NULL;
1814 	return(r);
1815 }
1816 
1817 static void
1818 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
1819 {
1820 	bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1821 	bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1822 	bus_dma_tag_destroy(dma->dma_tag);
1823 }
1824 
1825 /*********************************************************************
1826  *
1827  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1828  *  the information needed to transmit a packet on the wire.
1829  *
1830  **********************************************************************/
1831 static int
1832 em_allocate_transmit_structures(struct adapter * adapter)
1833 {
1834 	adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
1835 	    adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
1836 	if (adapter->tx_buffer_area == NULL) {
1837 		device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n");
1838 		return(ENOMEM);
1839 	}
1840 
1841 	return(0);
1842 }
1843 
1844 /*********************************************************************
1845  *
1846  *  Allocate and initialize transmit structures.
1847  *
1848  **********************************************************************/
1849 static int
1850 em_setup_transmit_structures(struct adapter * adapter)
1851 {
1852 	/*
1853 	 * Setup DMA descriptor areas.
1854 	 */
1855 	if (bus_dma_tag_create(NULL,                    /* parent */
1856 			       1, 0,			/* alignment, bounds */
1857 			       BUS_SPACE_MAXADDR,       /* lowaddr */
1858 			       BUS_SPACE_MAXADDR,       /* highaddr */
1859 			       NULL, NULL,              /* filter, filterarg */
1860 			       MCLBYTES * 8,            /* maxsize */
1861 			       EM_MAX_SCATTER,          /* nsegments */
1862 			       MCLBYTES * 8,            /* maxsegsize */
1863 			       BUS_DMA_ALLOCNOW,        /* flags */
1864 			       &adapter->txtag)) {
1865 		device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1866 		return(ENOMEM);
1867 	}
1868 
1869 	if (em_allocate_transmit_structures(adapter))
1870 		return(ENOMEM);
1871 
1872         bzero((void *) adapter->tx_desc_base,
1873               (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
1874 
1875         adapter->next_avail_tx_desc = 0;
1876 	adapter->oldest_used_tx_desc = 0;
1877 
1878 	/* Set number of descriptors available */
1879 	adapter->num_tx_desc_avail = adapter->num_tx_desc;
1880 
1881 	/* Set checksum context */
1882 	adapter->active_checksum_context = OFFLOAD_NONE;
1883 
1884 	return(0);
1885 }
1886 
1887 /*********************************************************************
1888  *
1889  *  Enable transmit unit.
1890  *
1891  **********************************************************************/
1892 static void
1893 em_initialize_transmit_unit(struct adapter * adapter)
1894 {
1895 	uint32_t reg_tctl;
1896 	uint32_t reg_tipg = 0;
1897 	uint64_t bus_addr;
1898 
1899 	INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
1900 
1901 	/* Setup the Base and Length of the Tx Descriptor Ring */
1902 	bus_addr = adapter->txdma.dma_paddr;
1903 	E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
1904 	E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
1905 	E1000_WRITE_REG(&adapter->hw, TDLEN,
1906 			adapter->num_tx_desc * sizeof(struct em_tx_desc));
1907 
1908 	/* Setup the HW Tx Head and Tail descriptor pointers */
1909 	E1000_WRITE_REG(&adapter->hw, TDH, 0);
1910 	E1000_WRITE_REG(&adapter->hw, TDT, 0);
1911 
1912 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
1913 		     E1000_READ_REG(&adapter->hw, TDBAL),
1914 		     E1000_READ_REG(&adapter->hw, TDLEN));
1915 
1916 	/* Set the default values for the Tx Inter Packet Gap timer */
1917 	switch (adapter->hw.mac_type) {
1918 	case em_82542_rev2_0:
1919 	case em_82542_rev2_1:
1920 		reg_tipg = DEFAULT_82542_TIPG_IPGT;
1921 		reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1922 		reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1923 		break;
1924 	default:
1925 		if (adapter->hw.media_type == em_media_type_fiber)
1926 			reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1927 		else
1928 			reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1929 		reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1930 		reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1931 	}
1932 
1933 	E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
1934 	E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
1935 	if (adapter->hw.mac_type >= em_82540)
1936 		E1000_WRITE_REG(&adapter->hw, TADV,
1937 				adapter->tx_abs_int_delay.value);
1938 
1939 	/* Program the Transmit Control Register */
1940 	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
1941 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1942 	if (adapter->link_duplex == 1)
1943 		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1944 	else
1945 		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1946 	E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1947 
1948 	/* Setup Transmit Descriptor Settings for this adapter */
1949 	adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
1950 
1951 	if (adapter->tx_int_delay.value > 0)
1952 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1953 }
1954 
1955 /*********************************************************************
1956  *
1957  *  Free all transmit related data structures.
1958  *
1959  **********************************************************************/
1960 static void
1961 em_free_transmit_structures(struct adapter * adapter)
1962 {
1963 	struct em_buffer *tx_buffer;
1964 	int i;
1965 
1966 	INIT_DEBUGOUT("free_transmit_structures: begin");
1967 
1968 	if (adapter->tx_buffer_area != NULL) {
1969 		tx_buffer = adapter->tx_buffer_area;
1970 		for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1971 			if (tx_buffer->m_head != NULL) {
1972 				bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1973 				bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1974 				m_freem(tx_buffer->m_head);
1975 			}
1976 			tx_buffer->m_head = NULL;
1977 		}
1978 	}
1979 	if (adapter->tx_buffer_area != NULL) {
1980 		free(adapter->tx_buffer_area, M_DEVBUF);
1981 		adapter->tx_buffer_area = NULL;
1982 	}
1983 	if (adapter->txtag != NULL) {
1984 		bus_dma_tag_destroy(adapter->txtag);
1985 		adapter->txtag = NULL;
1986 	}
1987 }
1988 
1989 /*********************************************************************
1990  *
1991  *  The offload context needs to be set when we transfer the first
1992  *  packet of a particular protocol (TCP/UDP). We change the
1993  *  context only if the protocol type changes.
1994  *
1995  **********************************************************************/
1996 static void
1997 em_transmit_checksum_setup(struct adapter * adapter,
1998 			   struct mbuf *mp,
1999 			   uint32_t *txd_upper,
2000 			   uint32_t *txd_lower)
2001 {
2002 	struct em_context_desc *TXD;
2003 	struct em_buffer *tx_buffer;
2004 	int curr_txd;
2005 
2006 	if (mp->m_pkthdr.csum_flags) {
2007 		if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2008 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2009 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2010 			if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2011 				return;
2012 			else
2013 				adapter->active_checksum_context = OFFLOAD_TCP_IP;
2014 		} else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2015 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2016 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2017 			if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2018 				return;
2019 			else
2020 				adapter->active_checksum_context = OFFLOAD_UDP_IP;
2021 		} else {
2022 			*txd_upper = 0;
2023 			*txd_lower = 0;
2024 			return;
2025 		}
2026 	} else {
2027 		*txd_upper = 0;
2028 		*txd_lower = 0;
2029 		return;
2030 	}
2031 
2032 	/* If we reach this point, the checksum offload context
2033 	 * needs to be reset.
2034 	 */
2035 	curr_txd = adapter->next_avail_tx_desc;
2036 	tx_buffer = &adapter->tx_buffer_area[curr_txd];
2037 	TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2038 
2039 	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2040 	TXD->lower_setup.ip_fields.ipcso =
2041 	    ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2042 	TXD->lower_setup.ip_fields.ipcse =
2043 	    htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2044 
2045 	TXD->upper_setup.tcp_fields.tucss =
2046 	    ETHER_HDR_LEN + sizeof(struct ip);
2047 	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2048 
2049 	if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2050 		TXD->upper_setup.tcp_fields.tucso =
2051 		    ETHER_HDR_LEN + sizeof(struct ip) +
2052 		    offsetof(struct tcphdr, th_sum);
2053 	} else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2054 		TXD->upper_setup.tcp_fields.tucso =
2055 			ETHER_HDR_LEN + sizeof(struct ip) +
2056 			offsetof(struct udphdr, uh_sum);
2057 	}
2058 
2059 	TXD->tcp_seg_setup.data = htole32(0);
2060 	TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2061 
2062 	tx_buffer->m_head = NULL;
2063 
2064 	if (++curr_txd == adapter->num_tx_desc)
2065 		curr_txd = 0;
2066 
2067 	adapter->num_tx_desc_avail--;
2068 	adapter->next_avail_tx_desc = curr_txd;
2069 }
2070 
2071 /**********************************************************************
2072  *
2073  *  Examine each tx_buffer in the used queue. If the hardware is done
2074  *  processing the packet then free associated resources. The
2075  *  tx_buffer is put back on the free queue.
2076  *
2077  **********************************************************************/
2078 
2079 static void
2080 em_clean_transmit_interrupts(struct adapter *adapter)
2081 {
2082 	int s;
2083 	int i, num_avail;
2084 	struct em_buffer *tx_buffer;
2085 	struct em_tx_desc *tx_desc;
2086 	struct ifnet *ifp = &adapter->interface_data.ac_if;
2087 
2088 	if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2089 		return;
2090 
2091 	s = splimp();
2092 #ifdef DBG_STATS
2093 	adapter->clean_tx_interrupts++;
2094 #endif
2095 	num_avail = adapter->num_tx_desc_avail;
2096 	i = adapter->oldest_used_tx_desc;
2097 
2098 	tx_buffer = &adapter->tx_buffer_area[i];
2099 	tx_desc = &adapter->tx_desc_base[i];
2100 
2101 	while(tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2102 		tx_desc->upper.data = 0;
2103 		num_avail++;
2104 
2105 		if (tx_buffer->m_head) {
2106 			ifp->if_opackets++;
2107 			bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2108 					BUS_DMASYNC_POSTWRITE);
2109 			bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2110 			bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2111 
2112 			m_freem(tx_buffer->m_head);
2113 			tx_buffer->m_head = NULL;
2114 		}
2115 
2116 		if (++i == adapter->num_tx_desc)
2117 			i = 0;
2118 
2119 		tx_buffer = &adapter->tx_buffer_area[i];
2120 		tx_desc = &adapter->tx_desc_base[i];
2121 	}
2122 
2123 	adapter->oldest_used_tx_desc = i;
2124 
2125 	/*
2126 	 * If we have enough room, clear IFF_OACTIVE to tell the stack
2127 	 * that it is OK to send packets.
2128 	 * If there are no pending descriptors, clear the timeout. Otherwise,
2129 	 * if some descriptors have been freed, restart the timeout.
2130 	 */
2131 	if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2132 		ifp->if_flags &= ~IFF_OACTIVE;
2133 		if (num_avail == adapter->num_tx_desc)
2134 			ifp->if_timer = 0;
2135 		else if (num_avail == adapter->num_tx_desc_avail)
2136 			ifp->if_timer = EM_TX_TIMEOUT;
2137 	}
2138 	adapter->num_tx_desc_avail = num_avail;
2139 	splx(s);
2140 }
2141 
2142 /*********************************************************************
2143  *
2144  *  Get a buffer from system mbuf buffer pool.
2145  *
2146  **********************************************************************/
2147 static int
2148 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp, int how)
2149 {
2150 	struct mbuf *mp = nmp;
2151 	struct em_buffer *rx_buffer;
2152 	struct ifnet *ifp;
2153 	bus_addr_t paddr;
2154 	int error;
2155 
2156 	ifp = &adapter->interface_data.ac_if;
2157 
2158 	if (mp == NULL) {
2159 		mp = m_getcl(how, MT_DATA, M_PKTHDR);
2160 		if (mp == NULL) {
2161 			adapter->mbuf_cluster_failed++;
2162 			return(ENOBUFS);
2163 		}
2164 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2165 	} else {
2166 		mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2167 		mp->m_data = mp->m_ext.ext_buf;
2168 		mp->m_next = NULL;
2169 	}
2170 	if (ifp->if_mtu <= ETHERMTU)
2171 		m_adj(mp, ETHER_ALIGN);
2172 
2173 	rx_buffer = &adapter->rx_buffer_area[i];
2174 
2175 	/*
2176 	 * Using memory from the mbuf cluster pool, invoke the
2177 	 * bus_dma machinery to arrange the memory mapping.
2178 	 */
2179 	error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2180 				mtod(mp, void *), mp->m_len,
2181 				em_dmamap_cb, &paddr, 0);
2182 	if (error) {
2183 		m_free(mp);
2184 		return(error);
2185 	}
2186 	rx_buffer->m_head = mp;
2187 	adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2188 	bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2189 
2190 	return(0);
2191 }
2192 
2193 /*********************************************************************
2194  *
2195  *  Allocate memory for rx_buffer structures. Since we use one
2196  *  rx_buffer per received packet, the maximum number of rx_buffer's
2197  *  that we'll need is equal to the number of receive descriptors
2198  *  that we've allocated.
2199  *
2200  **********************************************************************/
2201 static int
2202 em_allocate_receive_structures(struct adapter *adapter)
2203 {
2204 	int i, error, size;
2205 	struct em_buffer *rx_buffer;
2206 
2207 	size = adapter->num_rx_desc * sizeof(struct em_buffer);
2208 	adapter->rx_buffer_area = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2209 
2210 	error = bus_dma_tag_create(NULL,		/* parent */
2211 				   1, 0,		/* alignment, bounds */
2212 				   BUS_SPACE_MAXADDR,	/* lowaddr */
2213 				   BUS_SPACE_MAXADDR,	/* highaddr */
2214 				   NULL, NULL,		/* filter, filterarg */
2215 				   MCLBYTES,		/* maxsize */
2216 				   1,			/* nsegments */
2217 				   MCLBYTES,		/* maxsegsize */
2218 				   BUS_DMA_ALLOCNOW,	/* flags */
2219 				   &adapter->rxtag);
2220 	if (error != 0) {
2221 		device_printf(adapter->dev, "em_allocate_receive_structures: "
2222 			      "bus_dma_tag_create failed; error %u\n", error);
2223 		goto fail_0;
2224 	}
2225 
2226 	rx_buffer = adapter->rx_buffer_area;
2227 	for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2228 		error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2229 					  &rx_buffer->map);
2230 		if (error != 0) {
2231 			device_printf(adapter->dev,
2232 				      "em_allocate_receive_structures: "
2233 				      "bus_dmamap_create failed; error %u\n",
2234 				      error);
2235 			goto fail_1;
2236 		}
2237 	}
2238 
2239 	for (i = 0; i < adapter->num_rx_desc; i++) {
2240 		error = em_get_buf(i, adapter, NULL, MB_WAIT);
2241 		if (error != 0) {
2242 			adapter->rx_buffer_area[i].m_head = NULL;
2243 			adapter->rx_desc_base[i].buffer_addr = 0;
2244 			return(error);
2245 		}
2246 	}
2247 
2248 	return(0);
2249 
2250 fail_1:
2251 	bus_dma_tag_destroy(adapter->rxtag);
2252 fail_0:
2253 	adapter->rxtag = NULL;
2254 	free(adapter->rx_buffer_area, M_DEVBUF);
2255 	adapter->rx_buffer_area = NULL;
2256 	return(error);
2257 }
2258 
2259 /*********************************************************************
2260  *
2261  *  Allocate and initialize receive structures.
2262  *
2263  **********************************************************************/
2264 static int
2265 em_setup_receive_structures(struct adapter *adapter)
2266 {
2267 	bzero((void *) adapter->rx_desc_base,
2268 	      (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2269 
2270 	if (em_allocate_receive_structures(adapter))
2271 		return(ENOMEM);
2272 
2273 	/* Setup our descriptor pointers */
2274 	adapter->next_rx_desc_to_check = 0;
2275 	return(0);
2276 }
2277 
2278 /*********************************************************************
2279  *
2280  *  Enable receive unit.
2281  *
2282  **********************************************************************/
2283 static void
2284 em_initialize_receive_unit(struct adapter *adapter)
2285 {
2286 	uint32_t reg_rctl;
2287 	uint32_t reg_rxcsum;
2288 	struct ifnet *ifp;
2289 	uint64_t bus_addr;
2290 
2291 	INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2292 
2293 	ifp = &adapter->interface_data.ac_if;
2294 
2295 	/* Make sure receives are disabled while setting up the descriptor ring */
2296 	E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2297 
2298 	/* Set the Receive Delay Timer Register */
2299 	E1000_WRITE_REG(&adapter->hw, RDTR,
2300 			adapter->rx_int_delay.value | E1000_RDT_FPDB);
2301 
2302 	if(adapter->hw.mac_type >= em_82540) {
2303 		E1000_WRITE_REG(&adapter->hw, RADV,
2304 				adapter->rx_abs_int_delay.value);
2305 
2306 		/* Set the interrupt throttling rate in 256ns increments */
2307 		if (em_int_throttle_ceil) {
2308 			E1000_WRITE_REG(&adapter->hw, ITR,
2309 				1000000000 / 256 / em_int_throttle_ceil);
2310 		} else {
2311 			E1000_WRITE_REG(&adapter->hw, ITR, 0);
2312 		}
2313 	}
2314 
2315 	/* Setup the Base and Length of the Rx Descriptor Ring */
2316 	bus_addr = adapter->rxdma.dma_paddr;
2317 	E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2318 	E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2319 	E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2320 			sizeof(struct em_rx_desc));
2321 
2322 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2323 	E1000_WRITE_REG(&adapter->hw, RDH, 0);
2324 	E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2325 
2326 	/* Setup the Receive Control Register */
2327 	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2328 		   E1000_RCTL_RDMTS_HALF |
2329 		   (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2330 
2331 	if (adapter->hw.tbi_compatibility_on == TRUE)
2332 		reg_rctl |= E1000_RCTL_SBP;
2333 
2334 	switch (adapter->rx_buffer_len) {
2335 	default:
2336 	case EM_RXBUFFER_2048:
2337 		reg_rctl |= E1000_RCTL_SZ_2048;
2338 		break;
2339 	case EM_RXBUFFER_4096:
2340 		reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2341 		break;
2342 	case EM_RXBUFFER_8192:
2343 		reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2344 		break;
2345 	case EM_RXBUFFER_16384:
2346 		reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2347 		break;
2348 	}
2349 
2350 	if (ifp->if_mtu > ETHERMTU)
2351 		reg_rctl |= E1000_RCTL_LPE;
2352 
2353 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2354 	if ((adapter->hw.mac_type >= em_82543) &&
2355 	    (ifp->if_capenable & IFCAP_RXCSUM)) {
2356 		reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2357 		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2358 		E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2359 	}
2360 
2361 	/* Enable Receives */
2362 	E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2363 }
2364 
2365 /*********************************************************************
2366  *
2367  *  Free receive related data structures.
2368  *
2369  **********************************************************************/
2370 static void
2371 em_free_receive_structures(struct adapter *adapter)
2372 {
2373 	struct em_buffer *rx_buffer;
2374 	int i;
2375 
2376 	INIT_DEBUGOUT("free_receive_structures: begin");
2377 
2378 	if (adapter->rx_buffer_area != NULL) {
2379 		rx_buffer = adapter->rx_buffer_area;
2380 		for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2381 			if (rx_buffer->map != NULL) {
2382 				bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2383 				bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2384 			}
2385 			if (rx_buffer->m_head != NULL)
2386 				m_freem(rx_buffer->m_head);
2387 			rx_buffer->m_head = NULL;
2388 		}
2389 	}
2390 	if (adapter->rx_buffer_area != NULL) {
2391 		free(adapter->rx_buffer_area, M_DEVBUF);
2392 		adapter->rx_buffer_area = NULL;
2393 	}
2394 	if (adapter->rxtag != NULL) {
2395 		bus_dma_tag_destroy(adapter->rxtag);
2396 		adapter->rxtag = NULL;
2397 	}
2398 }
2399 
2400 /*********************************************************************
2401  *
2402  *  This routine executes in interrupt context. It replenishes
2403  *  the mbufs in the descriptor and sends data which has been
2404  *  dma'ed into host memory to upper layer.
2405  *
2406  *  We loop at most count times if count is > 0, or until done if
2407  *  count < 0.
2408  *
2409  *********************************************************************/
2410 static void
2411 em_process_receive_interrupts(struct adapter *adapter, int count)
2412 {
2413 	struct ifnet *ifp;
2414 	struct mbuf *mp;
2415 	uint8_t accept_frame = 0;
2416 	uint8_t eop = 0;
2417 	uint16_t len, desc_len, prev_len_adj;
2418 	int i;
2419 
2420 	/* Pointer to the receive descriptor being examined. */
2421 	struct em_rx_desc *current_desc;
2422 
2423 	ifp = &adapter->interface_data.ac_if;
2424 	i = adapter->next_rx_desc_to_check;
2425 	current_desc = &adapter->rx_desc_base[i];
2426 
2427 	if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2428 #ifdef DBG_STATS
2429 		adapter->no_pkts_avail++;
2430 #endif
2431 		return;
2432 	}
2433 	while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2434 		mp = adapter->rx_buffer_area[i].m_head;
2435 		bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2436 				BUS_DMASYNC_POSTREAD);
2437 
2438 		accept_frame = 1;
2439 		prev_len_adj = 0;
2440 		desc_len = le16toh(current_desc->length);
2441 		if (current_desc->status & E1000_RXD_STAT_EOP) {
2442 			count--;
2443 			eop = 1;
2444 			if (desc_len < ETHER_CRC_LEN) {
2445 				len = 0;
2446 				prev_len_adj = ETHER_CRC_LEN - desc_len;
2447 			}
2448 			else {
2449 				len = desc_len - ETHER_CRC_LEN;
2450 			}
2451 		} else {
2452 			eop = 0;
2453 			len = desc_len;
2454 		}
2455 
2456 		if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2457 			uint8_t last_byte;
2458 			uint32_t pkt_len = desc_len;
2459 
2460 			if (adapter->fmp != NULL)
2461 				pkt_len += adapter->fmp->m_pkthdr.len;
2462 
2463 			last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2464 
2465 			if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2466 				       current_desc->errors,
2467 				       pkt_len, last_byte)) {
2468 				em_tbi_adjust_stats(&adapter->hw,
2469 						    &adapter->stats,
2470 						    pkt_len,
2471 						    adapter->hw.mac_addr);
2472 				if (len > 0)
2473 					len--;
2474 			}
2475 			else {
2476 				accept_frame = 0;
2477 			}
2478 		}
2479 
2480 		if (accept_frame) {
2481 			if (em_get_buf(i, adapter, NULL, MB_DONTWAIT) == ENOBUFS) {
2482 				adapter->dropped_pkts++;
2483 				em_get_buf(i, adapter, mp, MB_DONTWAIT);
2484 				if (adapter->fmp != NULL)
2485 					m_freem(adapter->fmp);
2486 				adapter->fmp = NULL;
2487 				adapter->lmp = NULL;
2488 				break;
2489 			}
2490 
2491 			/* Assign correct length to the current fragment */
2492 			mp->m_len = len;
2493 
2494 			if (adapter->fmp == NULL) {
2495 				mp->m_pkthdr.len = len;
2496 				adapter->fmp = mp;	 /* Store the first mbuf */
2497 				adapter->lmp = mp;
2498 			} else {
2499 				/* Chain mbuf's together */
2500 				mp->m_flags &= ~M_PKTHDR;
2501 				/*
2502 				 * Adjust length of previous mbuf in chain if we
2503 				 * received less than 4 bytes in the last descriptor.
2504 				 */
2505 				if (prev_len_adj > 0) {
2506 					adapter->lmp->m_len -= prev_len_adj;
2507 					adapter->fmp->m_pkthdr.len -= prev_len_adj;
2508 				}
2509 				adapter->lmp->m_next = mp;
2510 				adapter->lmp = adapter->lmp->m_next;
2511 				adapter->fmp->m_pkthdr.len += len;
2512 			}
2513 
2514 			if (eop) {
2515 				adapter->fmp->m_pkthdr.rcvif = ifp;
2516 				ifp->if_ipackets++;
2517 
2518 				em_receive_checksum(adapter, current_desc,
2519 						    adapter->fmp);
2520 				if (current_desc->status & E1000_RXD_STAT_VP)
2521 					VLAN_INPUT_TAG(adapter->fmp,
2522 						       (current_desc->special &
2523 							E1000_RXD_SPC_VLAN_MASK));
2524 				else
2525 					(*ifp->if_input)(ifp, adapter->fmp);
2526 				adapter->fmp = NULL;
2527 				adapter->lmp = NULL;
2528 			}
2529 		} else {
2530 			adapter->dropped_pkts++;
2531 			em_get_buf(i, adapter, mp, MB_DONTWAIT);
2532 			if (adapter->fmp != NULL)
2533 				m_freem(adapter->fmp);
2534 			adapter->fmp = NULL;
2535 			adapter->lmp = NULL;
2536 		}
2537 
2538 		/* Zero out the receive descriptors status  */
2539 		current_desc->status = 0;
2540 
2541 		/* Advance the E1000's Receive Queue #0  "Tail Pointer". */
2542 		E1000_WRITE_REG(&adapter->hw, RDT, i);
2543 
2544 		/* Advance our pointers to the next descriptor */
2545 		if (++i == adapter->num_rx_desc) {
2546 			i = 0;
2547 			current_desc = adapter->rx_desc_base;
2548 		} else
2549 			current_desc++;
2550 	}
2551 	adapter->next_rx_desc_to_check = i;
2552 }
2553 
2554 /*********************************************************************
2555  *
2556  *  Verify that the hardware indicated that the checksum is valid.
2557  *  Inform the stack about the status of checksum so that stack
2558  *  doesn't spend time verifying the checksum.
2559  *
2560  *********************************************************************/
2561 static void
2562 em_receive_checksum(struct adapter *adapter,
2563 		    struct em_rx_desc *rx_desc,
2564 		    struct mbuf *mp)
2565 {
2566 	/* 82543 or newer only */
2567 	if ((adapter->hw.mac_type < em_82543) ||
2568 	    /* Ignore Checksum bit is set */
2569 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2570 		mp->m_pkthdr.csum_flags = 0;
2571 		return;
2572 	}
2573 
2574 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2575 		/* Did it pass? */
2576 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2577 			/* IP Checksum Good */
2578 			mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2579 			mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2580 		} else {
2581 			mp->m_pkthdr.csum_flags = 0;
2582 		}
2583 	}
2584 
2585 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2586 		/* Did it pass? */
2587 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2588 			mp->m_pkthdr.csum_flags |=
2589 			(CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2590 			mp->m_pkthdr.csum_data = htons(0xffff);
2591 		}
2592 	}
2593 }
2594 
2595 
2596 static void
2597 em_enable_vlans(struct adapter *adapter)
2598 {
2599 	uint32_t ctrl;
2600 
2601 	E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2602 
2603 	ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2604 	ctrl |= E1000_CTRL_VME;
2605 	E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2606 }
2607 
2608 static void
2609 em_enable_intr(struct adapter *adapter)
2610 {
2611 	E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2612 }
2613 
2614 static void
2615 em_disable_intr(struct adapter *adapter)
2616 {
2617 	E1000_WRITE_REG(&adapter->hw, IMC,
2618 			(0xffffffff & ~E1000_IMC_RXSEQ));
2619 }
2620 
2621 static int
2622 em_is_valid_ether_addr(uint8_t *addr)
2623 {
2624 	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2625 
2626 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
2627 		return(FALSE);
2628 	else
2629 		return(TRUE);
2630 }
2631 
2632 void
2633 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2634 {
2635 	pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
2636 }
2637 
2638 void
2639 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2640 {
2641 	*value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
2642 }
2643 
2644 void
2645 em_pci_set_mwi(struct em_hw *hw)
2646 {
2647 	pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2648 			 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
2649 }
2650 
2651 void
2652 em_pci_clear_mwi(struct em_hw *hw)
2653 {
2654 	pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2655 			 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
2656 }
2657 
2658 uint32_t
2659 em_read_reg_io(struct em_hw *hw, uint32_t offset)
2660 {
2661 	bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2662 	return(bus_space_read_4(hw->reg_io_tag, hw->reg_io_handle, 4));
2663 }
2664 
2665 void
2666 em_write_reg_io(struct em_hw *hw, uint32_t offset, uint32_t value)
2667 {
2668 	bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2669 	bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 4, value);
2670 }
2671 
2672 /*********************************************************************
2673  * 82544 Coexistence issue workaround.
2674  *    There are 2 issues.
2675  *	1. Transmit Hang issue.
2676  *    To detect this issue, following equation can be used...
2677  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2678  *          If SUM[3:0] is in between 1 to 4, we will have this issue.
2679  *
2680  *	2. DAC issue.
2681  *    To detect this issue, following equation can be used...
2682  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2683  *          If SUM[3:0] is in between 9 to c, we will have this issue.
2684  *
2685  *
2686  *    WORKAROUND:
2687  *          Make sure we do not have ending address as 1,2,3,4(Hang) or
2688  *          9,a,b,c (DAC)
2689  *
2690 *************************************************************************/
2691 static uint32_t
2692 em_fill_descriptors(uint64_t address, uint32_t length, PDESC_ARRAY desc_array)
2693 {
2694 	/* Since issue is sensitive to length and address.*/
2695 	/* Let us first check the address...*/
2696 	uint32_t safe_terminator;
2697 	if (length <= 4) {
2698 		desc_array->descriptor[0].address = address;
2699 		desc_array->descriptor[0].length = length;
2700 		desc_array->elements = 1;
2701 		return(desc_array->elements);
2702 	}
2703 	safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
2704 	/* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
2705 	if (safe_terminator == 0 ||
2706 	    (safe_terminator > 4 && safe_terminator < 9) ||
2707 	    (safe_terminator > 0xC && safe_terminator <= 0xF)) {
2708 		desc_array->descriptor[0].address = address;
2709 		desc_array->descriptor[0].length = length;
2710 		desc_array->elements = 1;
2711 		return(desc_array->elements);
2712 	}
2713 
2714 	desc_array->descriptor[0].address = address;
2715 	desc_array->descriptor[0].length = length - 4;
2716 	desc_array->descriptor[1].address = address + (length - 4);
2717 	desc_array->descriptor[1].length = 4;
2718 	desc_array->elements = 2;
2719 	return(desc_array->elements);
2720 }
2721 
2722 /**********************************************************************
2723  *
2724  *  Update the board statistics counters.
2725  *
2726  **********************************************************************/
2727 static void
2728 em_update_stats_counters(struct adapter *adapter)
2729 {
2730 	struct ifnet   *ifp;
2731 
2732 	if (adapter->hw.media_type == em_media_type_copper ||
2733 	    (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
2734 		adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
2735 		adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
2736 	}
2737 	adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
2738 	adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
2739 	adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
2740 	adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
2741 
2742 	adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
2743 	adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
2744 	adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
2745 	adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
2746 	adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
2747 	adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
2748 	adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
2749 	adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
2750 	adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
2751 	adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
2752 	adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
2753 	adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
2754 	adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
2755 	adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
2756 	adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
2757 	adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
2758 	adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
2759 	adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
2760 	adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
2761 	adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
2762 
2763 	/* For the 64-bit byte counters the low dword must be read first. */
2764 	/* Both registers clear on the read of the high dword */
2765 
2766 	adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
2767 	adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
2768 	adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
2769 	adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
2770 
2771 	adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
2772 	adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
2773 	adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
2774 	adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
2775 	adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
2776 
2777 	adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
2778 	adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
2779 	adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
2780 	adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
2781 
2782 	adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
2783 	adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
2784 	adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
2785 	adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
2786 	adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
2787 	adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
2788 	adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
2789 	adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
2790 	adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
2791 	adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
2792 
2793 	if (adapter->hw.mac_type >= em_82543) {
2794 		adapter->stats.algnerrc +=
2795 		    E1000_READ_REG(&adapter->hw, ALGNERRC);
2796 		adapter->stats.rxerrc +=
2797 		    E1000_READ_REG(&adapter->hw, RXERRC);
2798 		adapter->stats.tncrs +=
2799 		    E1000_READ_REG(&adapter->hw, TNCRS);
2800 		adapter->stats.cexterr +=
2801 		    E1000_READ_REG(&adapter->hw, CEXTERR);
2802 		adapter->stats.tsctc +=
2803 		    E1000_READ_REG(&adapter->hw, TSCTC);
2804 		adapter->stats.tsctfc +=
2805 		    E1000_READ_REG(&adapter->hw, TSCTFC);
2806 	}
2807 	ifp = &adapter->interface_data.ac_if;
2808 
2809 	/* Fill out the OS statistics structure */
2810 	ifp->if_ibytes = adapter->stats.gorcl;
2811 	ifp->if_obytes = adapter->stats.gotcl;
2812 	ifp->if_imcasts = adapter->stats.mprc;
2813 	ifp->if_collisions = adapter->stats.colc;
2814 
2815 	/* Rx Errors */
2816 	ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
2817 	    adapter->stats.crcerrs + adapter->stats.algnerrc +
2818 	    adapter->stats.rlec + adapter->stats.rnbc +
2819 	    adapter->stats.mpc + adapter->stats.cexterr;
2820 
2821 	/* Tx Errors */
2822 	ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
2823 }
2824 
2825 
2826 /**********************************************************************
2827  *
2828  *  This routine is called only when em_display_debug_stats is enabled.
2829  *  This routine provides a way to take a look at important statistics
2830  *  maintained by the driver and hardware.
2831  *
2832  **********************************************************************/
2833 static void
2834 em_print_debug_info(struct adapter *adapter)
2835 {
2836 	device_t dev= adapter->dev;
2837 	uint8_t *hw_addr = adapter->hw.hw_addr;
2838 
2839 	device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
2840 	device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
2841 		      E1000_READ_REG(&adapter->hw, TIDV),
2842 		      E1000_READ_REG(&adapter->hw, TADV));
2843 	device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
2844 		      E1000_READ_REG(&adapter->hw, RDTR),
2845 		      E1000_READ_REG(&adapter->hw, RADV));
2846 #ifdef DBG_STATS
2847 	device_printf(dev, "Packets not Avail = %ld\n", adapter->no_pkts_avail);
2848 	device_printf(dev, "CleanTxInterrupts = %ld\n",
2849 		      adapter->clean_tx_interrupts);
2850 #endif
2851 	device_printf(dev, "fifo workaround = %lld, fifo_reset = %lld\n",
2852 		      (long long)adapter->tx_fifo_wrk,
2853 		      (long long)adapter->tx_fifo_reset);
2854 	device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
2855 		      E1000_READ_REG(&adapter->hw, TDH),
2856 		      E1000_READ_REG(&adapter->hw, TDT));
2857 	device_printf(dev, "Num Tx descriptors avail = %d\n",
2858 		      adapter->num_tx_desc_avail);
2859 	device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
2860 		      adapter->no_tx_desc_avail1);
2861 	device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
2862 		      adapter->no_tx_desc_avail2);
2863 	device_printf(dev, "Std mbuf failed = %ld\n",
2864 		      adapter->mbuf_alloc_failed);
2865 	device_printf(dev, "Std mbuf cluster failed = %ld\n",
2866 		      adapter->mbuf_cluster_failed);
2867 	device_printf(dev, "Driver dropped packets = %ld\n",
2868 		      adapter->dropped_pkts);
2869 }
2870 
2871 static void
2872 em_print_hw_stats(struct adapter *adapter)
2873 {
2874 	device_t dev= adapter->dev;
2875 
2876 	device_printf(dev, "Adapter: %p\n", adapter);
2877 
2878 	device_printf(dev, "Excessive collisions = %lld\n",
2879 		      (long long)adapter->stats.ecol);
2880 	device_printf(dev, "Symbol errors = %lld\n",
2881 		      (long long)adapter->stats.symerrs);
2882 	device_printf(dev, "Sequence errors = %lld\n",
2883 		      (long long)adapter->stats.sec);
2884 	device_printf(dev, "Defer count = %lld\n",
2885 		      (long long)adapter->stats.dc);
2886 
2887 	device_printf(dev, "Missed Packets = %lld\n",
2888 		      (long long)adapter->stats.mpc);
2889 	device_printf(dev, "Receive No Buffers = %lld\n",
2890 		      (long long)adapter->stats.rnbc);
2891 	device_printf(dev, "Receive length errors = %lld\n",
2892 		      (long long)adapter->stats.rlec);
2893 	device_printf(dev, "Receive errors = %lld\n",
2894 		      (long long)adapter->stats.rxerrc);
2895 	device_printf(dev, "Crc errors = %lld\n",
2896 		      (long long)adapter->stats.crcerrs);
2897 	device_printf(dev, "Alignment errors = %lld\n",
2898 		      (long long)adapter->stats.algnerrc);
2899 	device_printf(dev, "Carrier extension errors = %lld\n",
2900 		      (long long)adapter->stats.cexterr);
2901 
2902 	device_printf(dev, "XON Rcvd = %lld\n",
2903 		      (long long)adapter->stats.xonrxc);
2904 	device_printf(dev, "XON Xmtd = %lld\n",
2905 		      (long long)adapter->stats.xontxc);
2906 	device_printf(dev, "XOFF Rcvd = %lld\n",
2907 		      (long long)adapter->stats.xoffrxc);
2908 	device_printf(dev, "XOFF Xmtd = %lld\n",
2909 		      (long long)adapter->stats.xofftxc);
2910 
2911 	device_printf(dev, "Good Packets Rcvd = %lld\n",
2912 		      (long long)adapter->stats.gprc);
2913 	device_printf(dev, "Good Packets Xmtd = %lld\n",
2914 		      (long long)adapter->stats.gptc);
2915 }
2916 
2917 static int
2918 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
2919 {
2920 	int error;
2921 	int result;
2922 	struct adapter *adapter;
2923 
2924 	result = -1;
2925 	error = sysctl_handle_int(oidp, &result, 0, req);
2926 
2927 	if (error || !req->newptr)
2928 		return(error);
2929 
2930 	if (result == 1) {
2931 		adapter = (struct adapter *)arg1;
2932 		em_print_debug_info(adapter);
2933 	}
2934 
2935 	return(error);
2936 }
2937 
2938 static int
2939 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
2940 {
2941 	int error;
2942 	int result;
2943 	struct adapter *adapter;
2944 
2945 	result = -1;
2946 	error = sysctl_handle_int(oidp, &result, 0, req);
2947 
2948 	if (error || !req->newptr)
2949 		return(error);
2950 
2951 	if (result == 1) {
2952 		adapter = (struct adapter *)arg1;
2953 		em_print_hw_stats(adapter);
2954 	}
2955 
2956 	return(error);
2957 }
2958 
2959 static int
2960 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
2961 {
2962 	struct em_int_delay_info *info;
2963 	struct adapter *adapter;
2964 	uint32_t regval;
2965 	int error;
2966 	int usecs;
2967 	int ticks;
2968 	int s;
2969 
2970 	info = (struct em_int_delay_info *)arg1;
2971 	adapter = info->adapter;
2972 	usecs = info->value;
2973 	error = sysctl_handle_int(oidp, &usecs, 0, req);
2974 	if (error != 0 || req->newptr == NULL)
2975 		return(error);
2976 	if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
2977 		return(EINVAL);
2978 	info->value = usecs;
2979 	ticks = E1000_USECS_TO_TICKS(usecs);
2980 
2981 	s = splimp();
2982 	regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
2983 	regval = (regval & ~0xffff) | (ticks & 0xffff);
2984 	/* Handle a few special cases. */
2985 	switch (info->offset) {
2986 	case E1000_RDTR:
2987 	case E1000_82542_RDTR:
2988 		regval |= E1000_RDT_FPDB;
2989 		break;
2990 	case E1000_TIDV:
2991 	case E1000_82542_TIDV:
2992 		if (ticks == 0) {
2993 			adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
2994 			/* Don't write 0 into the TIDV register. */
2995 			regval++;
2996 		} else
2997 			adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2998 		break;
2999 	}
3000 	E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3001 	splx(s);
3002 	return(0);
3003 }
3004 
3005 static void
3006 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3007 			const char *description, struct em_int_delay_info *info,
3008 			int offset, int value)
3009 {
3010 	info->adapter = adapter;
3011 	info->offset = offset;
3012 	info->value = value;
3013 	SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3014 			SYSCTL_CHILDREN(adapter->sysctl_tree),
3015 			OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3016 			info, 0, em_sysctl_int_delay, "I", description);
3017 }
3018 
3019 static int
3020 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3021 {
3022 	struct adapter *adapter = (void *)arg1;
3023 	int error;
3024 	int throttle;
3025 
3026 	throttle = em_int_throttle_ceil;
3027 	error = sysctl_handle_int(oidp, &throttle, 0, req);
3028 	if (error || req->newptr == NULL)
3029 		return error;
3030 	if (throttle < 0 || throttle > 1000000000 / 256)
3031 		return EINVAL;
3032 	if (throttle) {
3033 		/*
3034 		 * Set the interrupt throttling rate in 256ns increments,
3035 		 * recalculate sysctl value assignment to get exact frequency.
3036 		 */
3037 		throttle = 1000000000 / 256 / throttle;
3038 		em_int_throttle_ceil = 1000000000 / 256 / throttle;
3039 		crit_enter();
3040 		E1000_WRITE_REG(&adapter->hw, ITR, throttle);
3041 		crit_exit();
3042 	} else {
3043 		em_int_throttle_ceil = 0;
3044 		crit_enter();
3045 		E1000_WRITE_REG(&adapter->hw, ITR, 0);
3046 		crit_exit();
3047 	}
3048 	device_printf(adapter->dev, "Interrupt moderation set to %d/sec\n",
3049 			em_int_throttle_ceil);
3050 	return 0;
3051 }
3052 
3053