xref: /openbsd-src/sys/dev/pci/if_em.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /**************************************************************************
2 
3 Copyright (c) 2001-2003, Intel Corporation
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15 
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19 
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31 
32 ***************************************************************************/
33 
34 /* $OpenBSD: if_em.c,v 1.264 2012/05/17 10:45:17 jsg Exp $ */
35 /* $FreeBSD: if_em.c,v 1.46 2004/09/29 18:28:28 mlaier Exp $ */
36 
37 #include <dev/pci/if_em.h>
38 #include <dev/pci/if_em_soc.h>
39 
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 
44 #define EM_DRIVER_VERSION	"6.2.9"
45 
46 /*********************************************************************
47  *  PCI Device ID Table
48  *********************************************************************/
49 const struct pci_matchid em_devices[] = {
50 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_DPT },
51 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_DPT },
52 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_SPT },
53 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_SPT },
54 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM },
55 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM },
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI },
60 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE },
61 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER },
62 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM },
63 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI },
64 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_LF },
65 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE },
66 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542 },
67 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER },
68 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER },
69 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER },
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD_CPR },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR_K },
86 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES },
87 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_2 },
88 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI },
89 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE },
90 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI },
91 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_AF },
92 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_AT },
93 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER },
94 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER },
95 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR },
96 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR_LP },
97 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FBR },
98 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES },
99 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SDS_DUAL },
100 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SDS_QUAD },
101 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_CPR },
102 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER },
103 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER },
104 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES },
105 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI },
106 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E },
107 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT },
108 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_PM },
109 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L },
110 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L_PL_1 },
111 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L_PL_2 },
112 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573V_PM },
113 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L },
114 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA },
115 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER },
116 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_SERDES },
117 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_CPR },
118 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QP_PM },
119 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576 },
120 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER },
121 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES },
122 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER },
123 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_CU_ET2 },
124 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS },
125 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES },
126 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD },
127 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82577LC },
128 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82577LM },
129 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82578DC },
130 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82578DM },
131 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82579LM },
132 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82579V },
133 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER },
134 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER },
135 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES },
136 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII },
137 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL },
138 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V },
139 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER },
140 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER },
141 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES },
142 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII },
143 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_82567V_3 },
144 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE },
145 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE_G },
146 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE_GT },
147 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_AMT },
148 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_C },
149 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_M },
150 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_M_AMT },
151 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_BM },
152 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE },
153 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE_G },
154 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE_GT },
155 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_AMT },
156 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_C },
157 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M },
158 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M_AMT },
159 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M_V },
160 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_D_BM_LF },
161 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_D_BM_LM },
162 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_LF },
163 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_LM },
164 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_V },
165 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_1 },
166 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_2 },
167 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_3 }
168 };
169 
170 /*********************************************************************
171  *  Function prototypes
172  *********************************************************************/
173 int  em_probe(struct device *, void *, void *);
174 void em_attach(struct device *, struct device *, void *);
175 void em_defer_attach(struct device*);
176 int  em_detach(struct device *, int);
177 int  em_activate(struct device *, int);
178 int  em_intr(void *);
179 void em_start(struct ifnet *);
180 int  em_ioctl(struct ifnet *, u_long, caddr_t);
181 void em_watchdog(struct ifnet *);
182 void em_init(void *);
183 void em_stop(void *, int);
184 void em_media_status(struct ifnet *, struct ifmediareq *);
185 int  em_media_change(struct ifnet *);
186 int  em_flowstatus(struct em_softc *);
187 void em_identify_hardware(struct em_softc *);
188 int  em_allocate_pci_resources(struct em_softc *);
189 void em_free_pci_resources(struct em_softc *);
190 void em_local_timer(void *);
191 int  em_hardware_init(struct em_softc *);
192 void em_setup_interface(struct em_softc *);
193 int  em_setup_transmit_structures(struct em_softc *);
194 void em_initialize_transmit_unit(struct em_softc *);
195 int  em_setup_receive_structures(struct em_softc *);
196 void em_initialize_receive_unit(struct em_softc *);
197 void em_enable_intr(struct em_softc *);
198 void em_disable_intr(struct em_softc *);
199 void em_free_transmit_structures(struct em_softc *);
200 void em_free_receive_structures(struct em_softc *);
201 void em_update_stats_counters(struct em_softc *);
202 void em_txeof(struct em_softc *);
203 int  em_allocate_receive_structures(struct em_softc *);
204 int  em_allocate_transmit_structures(struct em_softc *);
205 #ifdef __STRICT_ALIGNMENT
206 void em_realign(struct em_softc *, struct mbuf *, u_int16_t *);
207 #else
208 #define em_realign(a, b, c) /* a, b, c */
209 #endif
210 int  em_rxfill(struct em_softc *);
211 void em_rxeof(struct em_softc *, int);
212 void em_receive_checksum(struct em_softc *, struct em_rx_desc *,
213 			 struct mbuf *);
214 #ifdef EM_CSUM_OFFLOAD
215 void em_transmit_checksum_setup(struct em_softc *, struct mbuf *,
216 				u_int32_t *, u_int32_t *);
217 #endif
218 void em_iff(struct em_softc *);
219 #ifdef EM_DEBUG
220 void em_print_hw_stats(struct em_softc *);
221 #endif
222 void em_update_link_status(struct em_softc *);
223 int  em_get_buf(struct em_softc *, int);
224 void em_enable_hw_vlans(struct em_softc *);
225 int  em_encap(struct em_softc *, struct mbuf *);
226 void em_smartspeed(struct em_softc *);
227 int  em_82547_fifo_workaround(struct em_softc *, int);
228 void em_82547_update_fifo_head(struct em_softc *, int);
229 int  em_82547_tx_fifo_reset(struct em_softc *);
230 void em_82547_move_tail(void *arg);
231 void em_82547_move_tail_locked(struct em_softc *);
232 int  em_dma_malloc(struct em_softc *, bus_size_t, struct em_dma_alloc *,
233 		   int);
234 void em_dma_free(struct em_softc *, struct em_dma_alloc *);
235 int  em_is_valid_ether_addr(u_int8_t *);
236 u_int32_t em_fill_descriptors(u_int64_t address, u_int32_t length,
237 			      PDESC_ARRAY desc_array);
238 
239 /*********************************************************************
240  *  OpenBSD Device Interface Entry Points
241  *********************************************************************/
242 
243 struct cfattach em_ca = {
244 	sizeof(struct em_softc), em_probe, em_attach, em_detach,
245 	em_activate
246 };
247 
248 struct cfdriver em_cd = {
249 	NULL, "em", DV_IFNET
250 };
251 
252 static int em_smart_pwr_down = FALSE;
253 
254 /*********************************************************************
255  *  Device identification routine
256  *
257  *  em_probe determines if the driver should be loaded on
258  *  adapter based on PCI vendor/device id of the adapter.
259  *
260  *  return 0 on no match, positive on match
261  *********************************************************************/
262 
263 int
264 em_probe(struct device *parent, void *match, void *aux)
265 {
266 	INIT_DEBUGOUT("em_probe: begin");
267 
268 	return (pci_matchbyid((struct pci_attach_args *)aux, em_devices,
269 	    nitems(em_devices)));
270 }
271 
272 void
273 em_defer_attach(struct device *self)
274 {
275 	struct em_softc *sc = (struct em_softc *)self;
276 	struct pci_attach_args *pa = &sc->osdep.em_pa;
277 	pci_chipset_tag_t	pc = pa->pa_pc;
278 	void *gcu;
279 
280 	if ((gcu = em_lookup_gcu(self)) == 0) {
281 		printf("%s: No GCU found, defered attachment failed\n",
282 		    sc->sc_dv.dv_xname);
283 
284 		if (sc->sc_intrhand)
285 			pci_intr_disestablish(pc, sc->sc_intrhand);
286 		sc->sc_intrhand = 0;
287 
288 		em_stop(sc, 1);
289 
290 		em_free_pci_resources(sc);
291 		em_dma_free(sc, &sc->rxdma);
292 		em_dma_free(sc, &sc->txdma);
293 
294 		return;
295 	}
296 
297 	sc->hw.gcu = gcu;
298 
299 	em_attach_miibus(self);
300 
301 	em_setup_interface(sc);
302 
303 	em_update_link_status(sc);
304 
305 	em_setup_link(&sc->hw);
306 }
307 
308 /*********************************************************************
309  *  Device initialization routine
310  *
311  *  The attach entry point is called when the driver is being loaded.
312  *  This routine identifies the type of hardware, allocates all resources
313  *  and initializes the hardware.
314  *
315  *********************************************************************/
316 
317 void
318 em_attach(struct device *parent, struct device *self, void *aux)
319 {
320 	struct pci_attach_args *pa = aux;
321 	struct em_softc *sc;
322 	int tsize, rsize;
323 	int defer = 0;
324 
325 	INIT_DEBUGOUT("em_attach: begin");
326 
327 	sc = (struct em_softc *)self;
328 	sc->osdep.em_pa = *pa;
329 
330 	timeout_set(&sc->timer_handle, em_local_timer, sc);
331 	timeout_set(&sc->tx_fifo_timer_handle, em_82547_move_tail, sc);
332 
333 	/* Determine hardware revision */
334 	em_identify_hardware(sc);
335 
336 	/*
337 	 * Only use MSI on the newer PCIe parts, with the exception
338 	 * of 82571/82572 due to "Byte Enables 2 and 3 Are Not Set" errata
339 	 */
340 	if (sc->hw.mac_type <= em_82572)
341 		sc->osdep.em_pa.pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
342 
343 	/* Parameters (to be read from user) */
344 	if (sc->hw.mac_type >= em_82544) {
345 		sc->num_tx_desc = EM_MAX_TXD;
346 		sc->num_rx_desc = EM_MAX_RXD;
347 	} else {
348 		sc->num_tx_desc = EM_MAX_TXD_82543;
349 		sc->num_rx_desc = EM_MAX_RXD_82543;
350 	}
351 	sc->tx_int_delay = EM_TIDV;
352 	sc->tx_abs_int_delay = EM_TADV;
353 	sc->rx_int_delay = EM_RDTR;
354 	sc->rx_abs_int_delay = EM_RADV;
355 	sc->hw.autoneg = DO_AUTO_NEG;
356 	sc->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
357 	sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
358 	sc->hw.tbi_compatibility_en = TRUE;
359 	sc->rx_buffer_len = EM_RXBUFFER_2048;
360 
361 	sc->hw.phy_init_script = 1;
362 	sc->hw.phy_reset_disable = FALSE;
363 
364 #ifndef EM_MASTER_SLAVE
365 	sc->hw.master_slave = em_ms_hw_default;
366 #else
367 	sc->hw.master_slave = EM_MASTER_SLAVE;
368 #endif
369 
370 	/*
371 	 * This controls when hardware reports transmit completion
372 	 * status.
373 	 */
374 	sc->hw.report_tx_early = 1;
375 
376 	if (em_allocate_pci_resources(sc))
377 		goto err_pci;
378 
379 	/* Initialize eeprom parameters */
380 	em_init_eeprom_params(&sc->hw);
381 
382 	/*
383 	 * Set the max frame size assuming standard Ethernet
384 	 * sized frames.
385 	 */
386 	switch (sc->hw.mac_type) {
387 		case em_82573:
388 		{
389 			uint16_t	eeprom_data = 0;
390 
391 			/*
392 			 * 82573 only supports Jumbo frames
393 			 * if ASPM is disabled.
394 			 */
395 			em_read_eeprom(&sc->hw, EEPROM_INIT_3GIO_3,
396 			    1, &eeprom_data);
397 			if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
398 				sc->hw.max_frame_size = ETHER_MAX_LEN;
399 				break;
400 			}
401 			/* Allow Jumbo frames */
402 			/* FALLTHROUGH */
403 		}
404 		case em_82571:
405 		case em_82572:
406 		case em_82574:
407 		case em_82575:
408 		case em_82580:
409 		case em_i350:
410 		case em_ich9lan:
411 		case em_ich10lan:
412 		case em_80003es2lan:
413 			/* Limit Jumbo Frame size */
414 			sc->hw.max_frame_size = 9234;
415 			break;
416 		case em_pchlan:
417 			sc->hw.max_frame_size = 4096;
418 			break;
419 		case em_82542_rev2_0:
420 		case em_82542_rev2_1:
421 		case em_ich8lan:
422 			/* Adapters that do not support Jumbo frames */
423 			sc->hw.max_frame_size = ETHER_MAX_LEN;
424 			break;
425 		default:
426 			sc->hw.max_frame_size =
427 			    MAX_JUMBO_FRAME_SIZE;
428 	}
429 
430 	sc->hw.min_frame_size =
431 	    ETHER_MIN_LEN + ETHER_CRC_LEN;
432 
433 	if (sc->hw.mac_type >= em_82544)
434 	    tsize = EM_ROUNDUP(sc->num_tx_desc * sizeof(struct em_tx_desc),
435 		EM_MAX_TXD * sizeof(struct em_tx_desc));
436 	else
437 	    tsize = EM_ROUNDUP(sc->num_tx_desc * sizeof(struct em_tx_desc),
438 		EM_MAX_TXD_82543 * sizeof(struct em_tx_desc));
439 	tsize = EM_ROUNDUP(tsize, PAGE_SIZE);
440 
441 	/* Allocate Transmit Descriptor ring */
442 	if (em_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
443 		printf("%s: Unable to allocate tx_desc memory\n",
444 		       sc->sc_dv.dv_xname);
445 		goto err_tx_desc;
446 	}
447 	sc->tx_desc_base = (struct em_tx_desc *)sc->txdma.dma_vaddr;
448 
449 	if (sc->hw.mac_type >= em_82544)
450 	    rsize = EM_ROUNDUP(sc->num_rx_desc * sizeof(struct em_rx_desc),
451 		EM_MAX_RXD * sizeof(struct em_rx_desc));
452 	else
453 	    rsize = EM_ROUNDUP(sc->num_rx_desc * sizeof(struct em_rx_desc),
454 		EM_MAX_RXD_82543 * sizeof(struct em_rx_desc));
455 	rsize = EM_ROUNDUP(rsize, PAGE_SIZE);
456 
457 	/* Allocate Receive Descriptor ring */
458 	if (em_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
459 		printf("%s: Unable to allocate rx_desc memory\n",
460 		       sc->sc_dv.dv_xname);
461 		goto err_rx_desc;
462 	}
463 	sc->rx_desc_base = (struct em_rx_desc *) sc->rxdma.dma_vaddr;
464 
465 	/* Initialize the hardware */
466 	if ((defer = em_hardware_init(sc))) {
467 		if (defer == EAGAIN)
468 			config_defer(self, em_defer_attach);
469 		else {
470 			printf("%s: Unable to initialize the hardware\n",
471 			    sc->sc_dv.dv_xname);
472 			goto err_hw_init;
473 		}
474 	}
475 
476 	if (sc->hw.mac_type == em_80003es2lan || sc->hw.mac_type == em_82575 ||
477 	    sc->hw.mac_type == em_82580 || sc->hw.mac_type == em_i350) {
478 		uint32_t reg = EM_READ_REG(&sc->hw, E1000_STATUS);
479 		sc->hw.bus_func = (reg & E1000_STATUS_FUNC_MASK) >>
480 		    E1000_STATUS_FUNC_SHIFT;
481 
482 		switch (sc->hw.bus_func) {
483 		case 0:
484 			sc->hw.swfw = E1000_SWFW_PHY0_SM;
485 			break;
486 		case 1:
487 			sc->hw.swfw = E1000_SWFW_PHY1_SM;
488 			break;
489 		case 2:
490 			sc->hw.swfw = E1000_SWFW_PHY2_SM;
491 			break;
492 		case 3:
493 			sc->hw.swfw = E1000_SWFW_PHY3_SM;
494 			break;
495 		}
496 	} else {
497 		sc->hw.bus_func = 0;
498 	}
499 
500 	/* Copy the permanent MAC address out of the EEPROM */
501 	if (em_read_mac_addr(&sc->hw) < 0) {
502 		printf("%s: EEPROM read error while reading mac address\n",
503 		       sc->sc_dv.dv_xname);
504 		goto err_mac_addr;
505 	}
506 
507 	if (!em_is_valid_ether_addr(sc->hw.mac_addr)) {
508 		printf("%s: Invalid mac address\n", sc->sc_dv.dv_xname);
509 		goto err_mac_addr;
510 	}
511 
512 	bcopy(sc->hw.mac_addr, sc->interface_data.ac_enaddr,
513 	    ETHER_ADDR_LEN);
514 
515 	/* Setup OS specific network interface */
516 	if (!defer)
517 		em_setup_interface(sc);
518 
519 	/* Initialize statistics */
520 	em_clear_hw_cntrs(&sc->hw);
521 #ifndef SMALL_KERNEL
522 	em_update_stats_counters(sc);
523 #endif
524 	sc->hw.get_link_status = 1;
525 	if (!defer)
526 		em_update_link_status(sc);
527 
528 	printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
529 
530 	/* Indicate SOL/IDER usage */
531 	if (em_check_phy_reset_block(&sc->hw))
532 		printf("%s: PHY reset is blocked due to SOL/IDER session.\n",
533 		    sc->sc_dv.dv_xname);
534 
535 	/* Identify 82544 on PCI-X */
536 	em_get_bus_info(&sc->hw);
537 	if (sc->hw.bus_type == em_bus_type_pcix &&
538 	    sc->hw.mac_type == em_82544)
539 		sc->pcix_82544 = TRUE;
540         else
541 		sc->pcix_82544 = FALSE;
542 
543 	sc->hw.icp_xxxx_is_link_up = FALSE;
544 
545 	INIT_DEBUGOUT("em_attach: end");
546 	return;
547 
548 err_mac_addr:
549 err_hw_init:
550 	em_dma_free(sc, &sc->rxdma);
551 err_rx_desc:
552 	em_dma_free(sc, &sc->txdma);
553 err_tx_desc:
554 err_pci:
555 	em_free_pci_resources(sc);
556 }
557 
558 /*********************************************************************
559  *  Transmit entry point
560  *
561  *  em_start is called by the stack to initiate a transmit.
562  *  The driver will remain in this routine as long as there are
563  *  packets to transmit and transmit resources are available.
564  *  In case resources are not available stack is notified and
565  *  the packet is requeued.
566  **********************************************************************/
567 
568 void
569 em_start(struct ifnet *ifp)
570 {
571 	struct mbuf    *m_head;
572 	struct em_softc *sc = ifp->if_softc;
573 	int		post = 0;
574 
575 	if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
576 		return;
577 
578 	if (!sc->link_active)
579 		return;
580 
581 	if (sc->hw.mac_type != em_82547) {
582 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
583 		    sc->txdma.dma_map->dm_mapsize,
584 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
585 	}
586 
587 	for (;;) {
588 		IFQ_POLL(&ifp->if_snd, m_head);
589 		if (m_head == NULL)
590 			break;
591 
592 		if (em_encap(sc, m_head)) {
593 			ifp->if_flags |= IFF_OACTIVE;
594 			break;
595 		}
596 
597 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
598 
599 #if NBPFILTER > 0
600 		/* Send a copy of the frame to the BPF listener */
601 		if (ifp->if_bpf)
602 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
603 #endif
604 
605 		/* Set timeout in case hardware has problems transmitting */
606 		ifp->if_timer = EM_TX_TIMEOUT;
607 
608 		post = 1;
609 	}
610 
611 	if (sc->hw.mac_type != em_82547) {
612 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
613 		    sc->txdma.dma_map->dm_mapsize,
614 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
615 		/*
616 		 * Advance the Transmit Descriptor Tail (Tdt),
617 		 * this tells the E1000 that this frame is
618 		 * available to transmit.
619 		 */
620 		if (post)
621 			E1000_WRITE_REG(&sc->hw, TDT, sc->next_avail_tx_desc);
622 	}
623 }
624 
625 /*********************************************************************
626  *  Ioctl entry point
627  *
628  *  em_ioctl is called when the user wants to configure the
629  *  interface.
630  *
631  *  return 0 on success, positive on failure
632  **********************************************************************/
633 
634 int
635 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
636 {
637 	int		error = 0;
638 	struct ifreq   *ifr = (struct ifreq *) data;
639 	struct ifaddr  *ifa = (struct ifaddr *)data;
640 	struct em_softc *sc = ifp->if_softc;
641 	int s;
642 
643 	s = splnet();
644 
645 	switch (command) {
646 	case SIOCSIFADDR:
647 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
648 			       "Addr)");
649 		if (!(ifp->if_flags & IFF_UP)) {
650 			ifp->if_flags |= IFF_UP;
651 			em_init(sc);
652 		}
653 #ifdef INET
654 		if (ifa->ifa_addr->sa_family == AF_INET)
655 			arp_ifinit(&sc->interface_data, ifa);
656 #endif /* INET */
657 		break;
658 
659 	case SIOCSIFFLAGS:
660 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
661 		if (ifp->if_flags & IFF_UP) {
662 			if (ifp->if_flags & IFF_RUNNING)
663 				error = ENETRESET;
664 			else
665 				em_init(sc);
666 		} else {
667 			if (ifp->if_flags & IFF_RUNNING)
668 				em_stop(sc, 0);
669 		}
670 		break;
671 
672 	case SIOCSIFMEDIA:
673 		/* Check SOL/IDER usage */
674 		if (em_check_phy_reset_block(&sc->hw)) {
675 			printf("%s: Media change is blocked due to SOL/IDER session.\n",
676 			    sc->sc_dv.dv_xname);
677 			break;
678 		}
679 	case SIOCGIFMEDIA:
680 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
681 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
682 		break;
683 
684 	default:
685 		error = ether_ioctl(ifp, &sc->interface_data, command, data);
686 	}
687 
688 	if (error == ENETRESET) {
689 		if (ifp->if_flags & IFF_RUNNING) {
690 			em_disable_intr(sc);
691 			em_iff(sc);
692 			if (sc->hw.mac_type == em_82542_rev2_0)
693 				em_initialize_receive_unit(sc);
694 			em_enable_intr(sc);
695 		}
696 		error = 0;
697 	}
698 
699 	splx(s);
700 	return (error);
701 }
702 
703 /*********************************************************************
704  *  Watchdog entry point
705  *
706  *  This routine is called whenever hardware quits transmitting.
707  *
708  **********************************************************************/
709 
710 void
711 em_watchdog(struct ifnet *ifp)
712 {
713 	struct em_softc *sc = ifp->if_softc;
714 
715 	/* If we are in this routine because of pause frames, then
716 	 * don't reset the hardware.
717 	 */
718 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_TXOFF) {
719 		ifp->if_timer = EM_TX_TIMEOUT;
720 		return;
721 	}
722 	printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
723 
724 	em_init(sc);
725 
726 	sc->watchdog_events++;
727 }
728 
729 /*********************************************************************
730  *  Init entry point
731  *
732  *  This routine is used in two ways. It is used by the stack as
733  *  init entry point in network interface structure. It is also used
734  *  by the driver as a hw/sw initialization routine to get to a
735  *  consistent state.
736  *
737  **********************************************************************/
738 
739 void
740 em_init(void *arg)
741 {
742 	struct em_softc *sc = arg;
743 	struct ifnet   *ifp = &sc->interface_data.ac_if;
744 	uint32_t	pba;
745 	int s;
746 
747 	s = splnet();
748 
749 	INIT_DEBUGOUT("em_init: begin");
750 
751 	em_stop(sc, 0);
752 
753 	/*
754 	 * Packet Buffer Allocation (PBA)
755 	 * Writing PBA sets the receive portion of the buffer
756 	 * the remainder is used for the transmit buffer.
757 	 *
758 	 * Devices before the 82547 had a Packet Buffer of 64K.
759 	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
760 	 * After the 82547 the buffer was reduced to 40K.
761 	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
762 	 *   Note: default does not leave enough room for Jumbo Frame >10k.
763 	 */
764 	switch (sc->hw.mac_type) {
765 	case em_82547:
766 	case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
767 		if (sc->hw.max_frame_size > EM_RXBUFFER_8192)
768 			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
769 		else
770 			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
771 		sc->tx_fifo_head = 0;
772 		sc->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
773 		sc->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
774 		break;
775 	case em_82571:
776 	case em_82572: /* Total Packet Buffer on these is 48k */
777 	case em_82575:
778 	case em_82580:
779 	case em_80003es2lan:
780 	case em_i350:
781 		pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
782 		break;
783 	case em_82573: /* 82573: Total Packet Buffer is 32K */
784 		/* Jumbo frames not supported */
785 		pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
786 		break;
787 	case em_82574: /* Total Packet Buffer is 40k */
788 		pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
789 		break;
790 	case em_ich8lan:
791 		pba = E1000_PBA_8K;
792 		break;
793 	case em_ich9lan:
794 	case em_ich10lan:
795 	case em_pchlan:
796 		pba = E1000_PBA_10K;
797 		break;
798 	case em_pch2lan:
799 		pba = E1000_PBA_26K;
800 		break;
801 	default:
802 		/* Devices before 82547 had a Packet Buffer of 64K.   */
803 		if (sc->hw.max_frame_size > EM_RXBUFFER_8192)
804 			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
805 		else
806 			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
807 	}
808 	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
809 	E1000_WRITE_REG(&sc->hw, PBA, pba);
810 
811 	/* Get the latest mac address, User can use a LAA */
812 	bcopy(sc->interface_data.ac_enaddr, sc->hw.mac_addr,
813 	      ETHER_ADDR_LEN);
814 
815 	/* Initialize the hardware */
816 	if (em_hardware_init(sc)) {
817 		printf("%s: Unable to initialize the hardware\n",
818 		       sc->sc_dv.dv_xname);
819 		splx(s);
820 		return;
821 	}
822 	em_update_link_status(sc);
823 
824 	E1000_WRITE_REG(&sc->hw, VET, ETHERTYPE_VLAN);
825 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
826 		em_enable_hw_vlans(sc);
827 
828 	/* Prepare transmit descriptors and buffers */
829 	if (em_setup_transmit_structures(sc)) {
830 		printf("%s: Could not setup transmit structures\n",
831 		       sc->sc_dv.dv_xname);
832 		em_stop(sc, 0);
833 		splx(s);
834 		return;
835 	}
836 	em_initialize_transmit_unit(sc);
837 
838 	/* Prepare receive descriptors and buffers */
839 	if (em_setup_receive_structures(sc)) {
840 		printf("%s: Could not setup receive structures\n",
841 		       sc->sc_dv.dv_xname);
842 		em_stop(sc, 0);
843 		splx(s);
844 		return;
845 	}
846 	em_initialize_receive_unit(sc);
847 
848 	/* Program promiscuous mode and multicast filters. */
849 	em_iff(sc);
850 
851 	ifp->if_flags |= IFF_RUNNING;
852 	ifp->if_flags &= ~IFF_OACTIVE;
853 
854 	timeout_add_sec(&sc->timer_handle, 1);
855 	em_clear_hw_cntrs(&sc->hw);
856 	em_enable_intr(sc);
857 
858 	/* Don't reset the phy next time init gets called */
859 	sc->hw.phy_reset_disable = TRUE;
860 
861 	splx(s);
862 }
863 
864 /*********************************************************************
865  *
866  *  Interrupt Service routine
867  *
868  **********************************************************************/
869 int
870 em_intr(void *arg)
871 {
872 	struct em_softc	*sc = arg;
873 	struct ifnet	*ifp = &sc->interface_data.ac_if;
874 	u_int32_t	reg_icr, test_icr;
875 	int		refill = 0;
876 
877 	test_icr = reg_icr = E1000_READ_REG(&sc->hw, ICR);
878 	if (sc->hw.mac_type >= em_82571)
879 		test_icr = (reg_icr & E1000_ICR_INT_ASSERTED);
880 	if (!test_icr)
881 		return (0);
882 
883 	if (ifp->if_flags & IFF_RUNNING) {
884 		em_rxeof(sc, -1);
885 		em_txeof(sc);
886 		refill = 1;
887 	}
888 
889 	/* Link status change */
890 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
891 		timeout_del(&sc->timer_handle);
892 		sc->hw.get_link_status = 1;
893 		em_check_for_link(&sc->hw);
894 		em_update_link_status(sc);
895 		timeout_add_sec(&sc->timer_handle, 1);
896 	}
897 
898 	if (reg_icr & E1000_ICR_RXO) {
899 		sc->rx_overruns++;
900 		refill = 1;
901 	}
902 
903 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
904 		em_start(ifp);
905 
906 	if (refill && em_rxfill(sc)) {
907 		/* Advance the Rx Queue #0 "Tail Pointer". */
908 		E1000_WRITE_REG(&sc->hw, RDT, sc->last_rx_desc_filled);
909 	}
910 
911 	return (1);
912 }
913 
914 /*********************************************************************
915  *
916  *  Media Ioctl callback
917  *
918  *  This routine is called whenever the user queries the status of
919  *  the interface using ifconfig.
920  *
921  **********************************************************************/
922 void
923 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
924 {
925 	struct em_softc *sc = ifp->if_softc;
926 	u_char fiber_type = IFM_1000_SX;
927 	u_int16_t gsr;
928 
929 	INIT_DEBUGOUT("em_media_status: begin");
930 
931 	em_check_for_link(&sc->hw);
932 	em_update_link_status(sc);
933 
934 	ifmr->ifm_status = IFM_AVALID;
935 	ifmr->ifm_active = IFM_ETHER;
936 
937 	if (!sc->link_active) {
938 		ifmr->ifm_active |= IFM_NONE;
939 		return;
940 	}
941 
942 	ifmr->ifm_status |= IFM_ACTIVE;
943 
944 	if (sc->hw.media_type == em_media_type_fiber ||
945 	    sc->hw.media_type == em_media_type_internal_serdes) {
946 		if (sc->hw.mac_type == em_82545)
947 			fiber_type = IFM_1000_LX;
948 		ifmr->ifm_active |= fiber_type | IFM_FDX;
949 	} else {
950 		switch (sc->link_speed) {
951 		case 10:
952 			ifmr->ifm_active |= IFM_10_T;
953 			break;
954 		case 100:
955 			ifmr->ifm_active |= IFM_100_TX;
956 			break;
957 		case 1000:
958 			ifmr->ifm_active |= IFM_1000_T;
959 			break;
960 		}
961 
962 		if (sc->link_duplex == FULL_DUPLEX)
963 			ifmr->ifm_active |= em_flowstatus(sc) | IFM_FDX;
964 		else
965 			ifmr->ifm_active |= IFM_HDX;
966 
967 		if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_1000_T) {
968 			em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &gsr);
969 			if (gsr & SR_1000T_MS_CONFIG_RES)
970 				ifmr->ifm_active |= IFM_ETH_MASTER;
971 		}
972 	}
973 }
974 
975 /*********************************************************************
976  *
977  *  Media Ioctl callback
978  *
979  *  This routine is called when the user changes speed/duplex using
980  *  media/mediopt option with ifconfig.
981  *
982  **********************************************************************/
983 int
984 em_media_change(struct ifnet *ifp)
985 {
986 	struct em_softc *sc = ifp->if_softc;
987 	struct ifmedia	*ifm = &sc->media;
988 
989 	INIT_DEBUGOUT("em_media_change: begin");
990 
991 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
992 		return (EINVAL);
993 
994 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
995 	case IFM_AUTO:
996 		sc->hw.autoneg = DO_AUTO_NEG;
997 		sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
998 		break;
999 	case IFM_1000_LX:
1000 	case IFM_1000_SX:
1001 	case IFM_1000_T:
1002 		sc->hw.autoneg = DO_AUTO_NEG;
1003 		sc->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1004 		break;
1005 	case IFM_100_TX:
1006 		sc->hw.autoneg = FALSE;
1007 		sc->hw.autoneg_advertised = 0;
1008 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1009 			sc->hw.forced_speed_duplex = em_100_full;
1010 		else
1011 			sc->hw.forced_speed_duplex = em_100_half;
1012 		break;
1013 	case IFM_10_T:
1014 		sc->hw.autoneg = FALSE;
1015 		sc->hw.autoneg_advertised = 0;
1016 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1017 			sc->hw.forced_speed_duplex = em_10_full;
1018 		else
1019 			sc->hw.forced_speed_duplex = em_10_half;
1020 		break;
1021 	default:
1022 		printf("%s: Unsupported media type\n", sc->sc_dv.dv_xname);
1023 	}
1024 
1025 	/*
1026 	 * As the speed/duplex settings may have changed we need to
1027 	 * reset the PHY.
1028 	 */
1029 	sc->hw.phy_reset_disable = FALSE;
1030 
1031 	em_init(sc);
1032 
1033 	return (0);
1034 }
1035 
1036 int
1037 em_flowstatus(struct em_softc *sc)
1038 {
1039 	u_int16_t ar, lpar;
1040 
1041 	if (sc->hw.media_type == em_media_type_fiber ||
1042 	    sc->hw.media_type == em_media_type_internal_serdes)
1043 		return (0);
1044 
1045 	em_read_phy_reg(&sc->hw, PHY_AUTONEG_ADV, &ar);
1046 	em_read_phy_reg(&sc->hw, PHY_LP_ABILITY, &lpar);
1047 
1048 	if ((ar & NWAY_AR_PAUSE) && (lpar & NWAY_LPAR_PAUSE))
1049 		return (IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE);
1050 	else if (!(ar & NWAY_AR_PAUSE) && (ar & NWAY_AR_ASM_DIR) &&
1051 		(lpar & NWAY_LPAR_PAUSE) && (lpar & NWAY_LPAR_ASM_DIR))
1052 		return (IFM_FLOW|IFM_ETH_TXPAUSE);
1053 	else if ((ar & NWAY_AR_PAUSE) && (ar & NWAY_AR_ASM_DIR) &&
1054 		!(lpar & NWAY_LPAR_PAUSE) && (lpar & NWAY_LPAR_ASM_DIR))
1055 		return (IFM_FLOW|IFM_ETH_RXPAUSE);
1056 
1057 	return (0);
1058 }
1059 
1060 /*********************************************************************
1061  *
1062  *  This routine maps the mbufs to tx descriptors.
1063  *
1064  *  return 0 on success, positive on failure
1065  **********************************************************************/
1066 int
1067 em_encap(struct em_softc *sc, struct mbuf *m_head)
1068 {
1069 	u_int32_t	txd_upper;
1070 	u_int32_t	txd_lower, txd_used = 0, txd_saved = 0;
1071 	int		i, j, first, error = 0, last = 0;
1072 	bus_dmamap_t	map;
1073 
1074 	/* For 82544 Workaround */
1075 	DESC_ARRAY		desc_array;
1076 	u_int32_t		array_elements;
1077 	u_int32_t		counter;
1078 
1079 	struct em_buffer   *tx_buffer, *tx_buffer_mapped;
1080 	struct em_tx_desc *current_tx_desc = NULL;
1081 
1082 	/*
1083 	 * Force a cleanup if number of TX descriptors
1084 	 * available hits the threshold
1085 	 */
1086 	if (sc->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1087 		em_txeof(sc);
1088 		/* Now do we at least have a minimal? */
1089 		if (sc->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1090 			sc->no_tx_desc_avail1++;
1091 			return (ENOBUFS);
1092 		}
1093 	}
1094 
1095 	if (sc->hw.mac_type == em_82547) {
1096 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1097 		    sc->txdma.dma_map->dm_mapsize,
1098 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1099 	}
1100 
1101 	/*
1102 	 * Map the packet for DMA.
1103 	 *
1104 	 * Capture the first descriptor index,
1105 	 * this descriptor will have the index
1106 	 * of the EOP which is the only one that
1107 	 * no gets a DONE bit writeback.
1108 	 */
1109 	first = sc->next_avail_tx_desc;
1110 	tx_buffer = &sc->tx_buffer_area[first];
1111 	tx_buffer_mapped = tx_buffer;
1112 	map = tx_buffer->map;
1113 
1114 	error = bus_dmamap_load_mbuf(sc->txtag, map, m_head, BUS_DMA_NOWAIT);
1115 	if (error != 0) {
1116 		sc->no_tx_dma_setup++;
1117 		goto loaderr;
1118 	}
1119 	EM_KASSERT(map->dm_nsegs!= 0, ("em_encap: empty packet"));
1120 
1121 	if (map->dm_nsegs > sc->num_tx_desc_avail - 2)
1122 		goto fail;
1123 
1124 #ifdef EM_CSUM_OFFLOAD
1125 	if (sc->hw.mac_type >= em_82543)
1126 		em_transmit_checksum_setup(sc, m_head, &txd_upper, &txd_lower);
1127 	else
1128 		txd_upper = txd_lower = 0;
1129 #else
1130 	txd_upper = txd_lower = 0;
1131 #endif
1132 
1133 	i = sc->next_avail_tx_desc;
1134 	if (sc->pcix_82544)
1135 		txd_saved = i;
1136 
1137 	for (j = 0; j < map->dm_nsegs; j++) {
1138 		/* If sc is 82544 and on PCI-X bus */
1139 		if (sc->pcix_82544) {
1140 			/*
1141 			 * Check the Address and Length combination and
1142 			 * split the data accordingly
1143 			 */
1144 			array_elements = em_fill_descriptors(map->dm_segs[j].ds_addr,
1145 							     map->dm_segs[j].ds_len,
1146 							     &desc_array);
1147 			for (counter = 0; counter < array_elements; counter++) {
1148 				if (txd_used == sc->num_tx_desc_avail) {
1149 					sc->next_avail_tx_desc = txd_saved;
1150 					goto fail;
1151 				}
1152 				tx_buffer = &sc->tx_buffer_area[i];
1153 				current_tx_desc = &sc->tx_desc_base[i];
1154 				current_tx_desc->buffer_addr = htole64(
1155 					desc_array.descriptor[counter].address);
1156 				current_tx_desc->lower.data = htole32(
1157 					(sc->txd_cmd | txd_lower |
1158 					 (u_int16_t)desc_array.descriptor[counter].length));
1159 				current_tx_desc->upper.data = htole32((txd_upper));
1160 				last = i;
1161 				if (++i == sc->num_tx_desc)
1162 					i = 0;
1163 
1164 				tx_buffer->m_head = NULL;
1165 				tx_buffer->next_eop = -1;
1166 				txd_used++;
1167 			}
1168 		} else {
1169 			tx_buffer = &sc->tx_buffer_area[i];
1170 			current_tx_desc = &sc->tx_desc_base[i];
1171 
1172 			current_tx_desc->buffer_addr = htole64(map->dm_segs[j].ds_addr);
1173 			current_tx_desc->lower.data = htole32(
1174 				sc->txd_cmd | txd_lower | map->dm_segs[j].ds_len);
1175 			current_tx_desc->upper.data = htole32(txd_upper);
1176 			last = i;
1177 			if (++i == sc->num_tx_desc)
1178 	        		i = 0;
1179 
1180 			tx_buffer->m_head = NULL;
1181 			tx_buffer->next_eop = -1;
1182 		}
1183 	}
1184 
1185 	sc->next_avail_tx_desc = i;
1186 	if (sc->pcix_82544)
1187 		sc->num_tx_desc_avail -= txd_used;
1188 	else
1189 		sc->num_tx_desc_avail -= map->dm_nsegs;
1190 
1191 #if NVLAN > 0
1192 	/* Find out if we are in VLAN mode */
1193 	if (m_head->m_flags & M_VLANTAG) {
1194 		/* Set the VLAN id */
1195 		current_tx_desc->upper.fields.special =
1196 			htole16(m_head->m_pkthdr.ether_vtag);
1197 
1198 		/* Tell hardware to add tag */
1199 		current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1200 	}
1201 #endif
1202 
1203 	tx_buffer->m_head = m_head;
1204 	tx_buffer_mapped->map = tx_buffer->map;
1205 	tx_buffer->map = map;
1206 	bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
1207 	    BUS_DMASYNC_PREWRITE);
1208 
1209 	/*
1210 	 * Last Descriptor of Packet
1211 	 * needs End Of Packet (EOP)
1212 	 * and Report Status (RS)
1213 	 */
1214 	current_tx_desc->lower.data |=
1215 	    htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1216 
1217 	/*
1218 	 * Keep track in the first buffer which
1219 	 * descriptor will be written back
1220 	 */
1221 	tx_buffer = &sc->tx_buffer_area[first];
1222 	tx_buffer->next_eop = last;
1223 
1224 	/*
1225 	 * Advance the Transmit Descriptor Tail (Tdt),
1226 	 * this tells the E1000 that this frame is
1227 	 * available to transmit.
1228 	 */
1229 	if (sc->hw.mac_type == em_82547) {
1230 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1231 		    sc->txdma.dma_map->dm_mapsize,
1232 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1233 		if (sc->link_duplex == HALF_DUPLEX)
1234 			em_82547_move_tail_locked(sc);
1235 		else {
1236 			E1000_WRITE_REG(&sc->hw, TDT, i);
1237 			em_82547_update_fifo_head(sc, m_head->m_pkthdr.len);
1238 		}
1239 	}
1240 
1241 	return (0);
1242 
1243 fail:
1244 	sc->no_tx_desc_avail2++;
1245 	bus_dmamap_unload(sc->txtag, map);
1246 	error = ENOBUFS;
1247 loaderr:
1248 	if (sc->hw.mac_type == em_82547) {
1249 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1250 		    sc->txdma.dma_map->dm_mapsize,
1251 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1252 	}
1253 	return (error);
1254 }
1255 
1256 /*********************************************************************
1257  *
1258  * 82547 workaround to avoid controller hang in half-duplex environment.
1259  * The workaround is to avoid queuing a large packet that would span
1260  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1261  * in this case. We do that only when FIFO is quiescent.
1262  *
1263  **********************************************************************/
1264 void
1265 em_82547_move_tail_locked(struct em_softc *sc)
1266 {
1267 	uint16_t hw_tdt;
1268 	uint16_t sw_tdt;
1269 	struct em_tx_desc *tx_desc;
1270 	uint16_t length = 0;
1271 	boolean_t eop = 0;
1272 
1273 	hw_tdt = E1000_READ_REG(&sc->hw, TDT);
1274 	sw_tdt = sc->next_avail_tx_desc;
1275 
1276 	while (hw_tdt != sw_tdt) {
1277 		tx_desc = &sc->tx_desc_base[hw_tdt];
1278 		length += tx_desc->lower.flags.length;
1279 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1280 		if (++hw_tdt == sc->num_tx_desc)
1281 			hw_tdt = 0;
1282 
1283 		if (eop) {
1284 			if (em_82547_fifo_workaround(sc, length)) {
1285 				sc->tx_fifo_wrk_cnt++;
1286 				timeout_add(&sc->tx_fifo_timer_handle, 1);
1287 				break;
1288 			}
1289 			E1000_WRITE_REG(&sc->hw, TDT, hw_tdt);
1290 			em_82547_update_fifo_head(sc, length);
1291 			length = 0;
1292 		}
1293 	}
1294 }
1295 
1296 void
1297 em_82547_move_tail(void *arg)
1298 {
1299 	struct em_softc *sc = arg;
1300 	int s;
1301 
1302 	s = splnet();
1303 	em_82547_move_tail_locked(sc);
1304 	splx(s);
1305 }
1306 
1307 int
1308 em_82547_fifo_workaround(struct em_softc *sc, int len)
1309 {
1310 	int fifo_space, fifo_pkt_len;
1311 
1312 	fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1313 
1314 	if (sc->link_duplex == HALF_DUPLEX) {
1315 		fifo_space = sc->tx_fifo_size - sc->tx_fifo_head;
1316 
1317 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1318 			if (em_82547_tx_fifo_reset(sc))
1319 				return (0);
1320 			else
1321 				return (1);
1322 		}
1323 	}
1324 
1325 	return (0);
1326 }
1327 
1328 void
1329 em_82547_update_fifo_head(struct em_softc *sc, int len)
1330 {
1331 	int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1332 
1333 	/* tx_fifo_head is always 16 byte aligned */
1334 	sc->tx_fifo_head += fifo_pkt_len;
1335 	if (sc->tx_fifo_head >= sc->tx_fifo_size)
1336 		sc->tx_fifo_head -= sc->tx_fifo_size;
1337 }
1338 
1339 int
1340 em_82547_tx_fifo_reset(struct em_softc *sc)
1341 {
1342 	uint32_t tctl;
1343 
1344 	if ((E1000_READ_REG(&sc->hw, TDT) ==
1345 	     E1000_READ_REG(&sc->hw, TDH)) &&
1346 	    (E1000_READ_REG(&sc->hw, TDFT) ==
1347 	     E1000_READ_REG(&sc->hw, TDFH)) &&
1348 	    (E1000_READ_REG(&sc->hw, TDFTS) ==
1349 	     E1000_READ_REG(&sc->hw, TDFHS)) &&
1350 	    (E1000_READ_REG(&sc->hw, TDFPC) == 0)) {
1351 
1352 		/* Disable TX unit */
1353 		tctl = E1000_READ_REG(&sc->hw, TCTL);
1354 		E1000_WRITE_REG(&sc->hw, TCTL, tctl & ~E1000_TCTL_EN);
1355 
1356 		/* Reset FIFO pointers */
1357 		E1000_WRITE_REG(&sc->hw, TDFT, sc->tx_head_addr);
1358 		E1000_WRITE_REG(&sc->hw, TDFH, sc->tx_head_addr);
1359 		E1000_WRITE_REG(&sc->hw, TDFTS, sc->tx_head_addr);
1360 		E1000_WRITE_REG(&sc->hw, TDFHS, sc->tx_head_addr);
1361 
1362 		/* Re-enable TX unit */
1363 		E1000_WRITE_REG(&sc->hw, TCTL, tctl);
1364 		E1000_WRITE_FLUSH(&sc->hw);
1365 
1366 		sc->tx_fifo_head = 0;
1367 		sc->tx_fifo_reset_cnt++;
1368 
1369 		return (TRUE);
1370 	} else
1371 		return (FALSE);
1372 }
1373 
1374 void
1375 em_iff(struct em_softc *sc)
1376 {
1377 	struct ifnet *ifp = &sc->interface_data.ac_if;
1378 	struct arpcom *ac = &sc->interface_data;
1379 	u_int32_t reg_rctl = 0;
1380 	u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1381 	struct ether_multi *enm;
1382 	struct ether_multistep step;
1383 	int i = 0;
1384 
1385 	IOCTL_DEBUGOUT("em_iff: begin");
1386 
1387 	if (sc->hw.mac_type == em_82542_rev2_0) {
1388 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1389 		if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1390 			em_pci_clear_mwi(&sc->hw);
1391 		reg_rctl |= E1000_RCTL_RST;
1392 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1393 		msec_delay(5);
1394 	}
1395 
1396 	reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1397 	reg_rctl &= ~(E1000_RCTL_MPE | E1000_RCTL_UPE);
1398 	ifp->if_flags &= ~IFF_ALLMULTI;
1399 
1400 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1401 	    ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1402 		ifp->if_flags |= IFF_ALLMULTI;
1403 		reg_rctl |= E1000_RCTL_MPE;
1404 		if (ifp->if_flags & IFF_PROMISC)
1405 			reg_rctl |= E1000_RCTL_UPE;
1406 	} else {
1407 		ETHER_FIRST_MULTI(step, ac, enm);
1408 		while (enm != NULL) {
1409 			bcopy(enm->enm_addrlo, mta + i, ETH_LENGTH_OF_ADDRESS);
1410 			i += ETH_LENGTH_OF_ADDRESS;
1411 
1412 			ETHER_NEXT_MULTI(step, enm);
1413 		}
1414 
1415 		em_mc_addr_list_update(&sc->hw, mta, ac->ac_multicnt, 0, 1);
1416 	}
1417 
1418 	E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1419 
1420 	if (sc->hw.mac_type == em_82542_rev2_0) {
1421 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1422 		reg_rctl &= ~E1000_RCTL_RST;
1423 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1424 		msec_delay(5);
1425 		if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1426 			em_pci_set_mwi(&sc->hw);
1427 	}
1428 }
1429 
1430 /*********************************************************************
1431  *  Timer routine
1432  *
1433  *  This routine checks for link status and updates statistics.
1434  *
1435  **********************************************************************/
1436 
1437 void
1438 em_local_timer(void *arg)
1439 {
1440 	struct ifnet   *ifp;
1441 	struct em_softc *sc = arg;
1442 	int s;
1443 
1444 	ifp = &sc->interface_data.ac_if;
1445 
1446 	s = splnet();
1447 
1448 	em_check_for_link(&sc->hw);
1449 	em_update_link_status(sc);
1450 #ifndef SMALL_KERNEL
1451 	em_update_stats_counters(sc);
1452 #ifdef EM_DEBUG
1453 	if (ifp->if_flags & IFF_DEBUG && ifp->if_flags & IFF_RUNNING)
1454 		em_print_hw_stats(sc);
1455 #endif
1456 #endif
1457 	em_smartspeed(sc);
1458 
1459 	timeout_add_sec(&sc->timer_handle, 1);
1460 
1461 	splx(s);
1462 }
1463 
1464 void
1465 em_update_link_status(struct em_softc *sc)
1466 {
1467 	struct ifnet *ifp = &sc->interface_data.ac_if;
1468 
1469 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU) {
1470 		if (sc->link_active == 0) {
1471 			em_get_speed_and_duplex(&sc->hw,
1472 						&sc->link_speed,
1473 						&sc->link_duplex);
1474 			/* Check if we may set SPEED_MODE bit on PCI-E */
1475 			if ((sc->link_speed == SPEED_1000) &&
1476 			    ((sc->hw.mac_type == em_82571) ||
1477 			    (sc->hw.mac_type == em_82572) ||
1478 			    (sc->hw.mac_type == em_82575) ||
1479 			    (sc->hw.mac_type == em_82580))) {
1480 				int tarc0;
1481 
1482 				tarc0 = E1000_READ_REG(&sc->hw, TARC0);
1483 				tarc0 |= SPEED_MODE_BIT;
1484 				E1000_WRITE_REG(&sc->hw, TARC0, tarc0);
1485 			}
1486 			sc->link_active = 1;
1487 			sc->smartspeed = 0;
1488 			ifp->if_baudrate = sc->link_speed * 1000000;
1489 		}
1490 		if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
1491 			if (sc->link_duplex == FULL_DUPLEX)
1492 				ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1493 			else
1494 				ifp->if_link_state = LINK_STATE_HALF_DUPLEX;
1495 			if_link_state_change(ifp);
1496 		}
1497 	} else {
1498 		if (sc->link_active == 1) {
1499 			ifp->if_baudrate = sc->link_speed = 0;
1500 			sc->link_duplex = 0;
1501 			sc->link_active = 0;
1502 		}
1503 		if (ifp->if_link_state != LINK_STATE_DOWN) {
1504 			ifp->if_link_state = LINK_STATE_DOWN;
1505 			if_link_state_change(ifp);
1506 		}
1507 	}
1508 }
1509 
1510 /*********************************************************************
1511  *
1512  *  This routine disables all traffic on the adapter by issuing a
1513  *  global reset on the MAC and deallocates TX/RX buffers.
1514  *
1515  **********************************************************************/
1516 
1517 void
1518 em_stop(void *arg, int softonly)
1519 {
1520 	struct em_softc *sc = arg;
1521 	struct ifnet   *ifp = &sc->interface_data.ac_if;
1522 
1523 	/* Tell the stack that the interface is no longer active */
1524 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1525 	ifp->if_timer = 0;
1526 
1527 	INIT_DEBUGOUT("em_stop: begin");
1528 
1529 	timeout_del(&sc->timer_handle);
1530 	timeout_del(&sc->tx_fifo_timer_handle);
1531 
1532 	if (!softonly) {
1533 		em_disable_intr(sc);
1534 		em_reset_hw(&sc->hw);
1535 	}
1536 
1537 	em_free_transmit_structures(sc);
1538 	em_free_receive_structures(sc);
1539 }
1540 
1541 /*********************************************************************
1542  *
1543  *  Determine hardware revision.
1544  *
1545  **********************************************************************/
1546 void
1547 em_identify_hardware(struct em_softc *sc)
1548 {
1549 	u_int32_t reg;
1550 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1551 
1552 	/* Make sure our PCI config space has the necessary stuff set */
1553 	sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
1554 					    PCI_COMMAND_STATUS_REG);
1555 
1556 	/* Save off the information about this board */
1557 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1558 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1559 
1560 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1561 	sc->hw.revision_id = PCI_REVISION(reg);
1562 
1563 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1564 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1565 	sc->hw.subsystem_id = PCI_PRODUCT(reg);
1566 
1567 	/* Identify the MAC */
1568 	if (em_set_mac_type(&sc->hw))
1569 		printf("%s: Unknown MAC Type\n", sc->sc_dv.dv_xname);
1570 
1571 	if (sc->hw.mac_type == em_pchlan)
1572 		sc->hw.revision_id = PCI_PRODUCT(pa->pa_id) & 0x0f;
1573 
1574 	if (sc->hw.mac_type == em_82541 ||
1575 	    sc->hw.mac_type == em_82541_rev_2 ||
1576 	    sc->hw.mac_type == em_82547 ||
1577 	    sc->hw.mac_type == em_82547_rev_2)
1578 		sc->hw.phy_init_script = TRUE;
1579 }
1580 
1581 int
1582 em_allocate_pci_resources(struct em_softc *sc)
1583 {
1584 	int		val, rid;
1585 	pci_intr_handle_t	ih;
1586 	const char		*intrstr = NULL;
1587 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1588 	pci_chipset_tag_t	pc = pa->pa_pc;
1589 
1590 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_MMBA);
1591 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1592 		printf(": mmba is not mem space\n");
1593 		return (ENXIO);
1594 	}
1595 	if (pci_mapreg_map(pa, EM_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
1596 	    &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
1597 	    &sc->osdep.em_membase, &sc->osdep.em_memsize, 0)) {
1598 		printf(": cannot find mem space\n");
1599 		return (ENXIO);
1600 	}
1601 
1602 	switch (sc->hw.mac_type) {
1603 	case em_82544:
1604 	case em_82540:
1605 	case em_82545:
1606 	case em_82546:
1607 	case em_82541:
1608 	case em_82541_rev_2:
1609 		/* Figure out where our I/O BAR is ? */
1610 		for (rid = PCI_MAPREG_START; rid < PCI_MAPREG_END;) {
1611 			val = pci_conf_read(pa->pa_pc, pa->pa_tag, rid);
1612 			if (PCI_MAPREG_TYPE(val) == PCI_MAPREG_TYPE_IO) {
1613 				sc->io_rid = rid;
1614 				break;
1615 			}
1616 			rid += 4;
1617 			if (PCI_MAPREG_MEM_TYPE(val) ==
1618 			    PCI_MAPREG_MEM_TYPE_64BIT)
1619 				rid += 4;	/* skip high bits, too */
1620 		}
1621 
1622 		if (pci_mapreg_map(pa, rid, PCI_MAPREG_TYPE_IO, 0,
1623 		    &sc->osdep.io_bus_space_tag, &sc->osdep.io_bus_space_handle,
1624 		    &sc->osdep.em_iobase, &sc->osdep.em_iosize, 0)) {
1625 			printf(": cannot find i/o space\n");
1626 			return (ENXIO);
1627 		}
1628 
1629 		sc->hw.io_base = 0;
1630 		break;
1631 	default:
1632 		break;
1633 	}
1634 
1635 	/* for ICH8 and family we need to find the flash memory */
1636 	if (sc->hw.mac_type == em_ich8lan ||
1637 	    sc->hw.mac_type == em_ich9lan ||
1638 	    sc->hw.mac_type == em_ich10lan ||
1639 	    sc->hw.mac_type == em_pchlan ||
1640 	    sc->hw.mac_type == em_pch2lan) {
1641 		val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_FLASH);
1642 		if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1643 			printf(": flash is not mem space\n");
1644 			return (ENXIO);
1645 		}
1646 
1647 		if (pci_mapreg_map(pa, EM_FLASH, PCI_MAPREG_MEM_TYPE(val), 0,
1648 		    &sc->osdep.flash_bus_space_tag, &sc->osdep.flash_bus_space_handle,
1649 		    &sc->osdep.em_flashbase, &sc->osdep.em_flashsize, 0)) {
1650 			printf(": cannot find mem space\n");
1651 			return (ENXIO);
1652 		}
1653         }
1654 
1655 	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
1656 		printf(": couldn't map interrupt\n");
1657 		return (ENXIO);
1658 	}
1659 
1660 	sc->osdep.dev = (struct device *)sc;
1661 	sc->hw.back = &sc->osdep;
1662 
1663 	intrstr = pci_intr_string(pc, ih);
1664 	sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, em_intr, sc,
1665 					      sc->sc_dv.dv_xname);
1666 	if (sc->sc_intrhand == NULL) {
1667 		printf(": couldn't establish interrupt");
1668 		if (intrstr != NULL)
1669 			printf(" at %s", intrstr);
1670 		printf("\n");
1671 		return (ENXIO);
1672 	}
1673 	printf(": %s", intrstr);
1674 
1675 	/*
1676 	 * the ICP_xxxx device has multiple, duplicate register sets for
1677 	 * use when it is being used as a network processor. Disable those
1678 	 * registers here, as they are not necessary in this context and
1679 	 * can confuse the system
1680 	 */
1681 	if(sc->hw.mac_type == em_icp_xxxx) {
1682 		int offset;
1683 		pcireg_t val;
1684 
1685 		if (!pci_get_capability(sc->osdep.em_pa.pa_pc,
1686 		    sc->osdep.em_pa.pa_tag, PCI_CAP_ID_ST, &offset, &val)) {
1687 			return (0);
1688 		}
1689 		offset += PCI_ST_SMIA_OFFSET;
1690 		pci_conf_write(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
1691 		    offset, 0x06);
1692 		E1000_WRITE_REG(&sc->hw, IMC1, ~0x0);
1693 		E1000_WRITE_REG(&sc->hw, IMC2, ~0x0);
1694 	}
1695 	return (0);
1696 }
1697 
1698 void
1699 em_free_pci_resources(struct em_softc *sc)
1700 {
1701 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1702 	pci_chipset_tag_t	pc = pa->pa_pc;
1703 
1704 	if (sc->sc_intrhand)
1705 		pci_intr_disestablish(pc, sc->sc_intrhand);
1706 	sc->sc_intrhand = 0;
1707 
1708 	if (sc->osdep.em_flashbase)
1709 		bus_space_unmap(sc->osdep.flash_bus_space_tag, sc->osdep.flash_bus_space_handle,
1710 				sc->osdep.em_flashsize);
1711 	sc->osdep.em_flashbase = 0;
1712 
1713 	if (sc->osdep.em_iobase)
1714 		bus_space_unmap(sc->osdep.io_bus_space_tag, sc->osdep.io_bus_space_handle,
1715 				sc->osdep.em_iosize);
1716 	sc->osdep.em_iobase = 0;
1717 
1718 	if (sc->osdep.em_membase)
1719 		bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
1720 				sc->osdep.em_memsize);
1721 	sc->osdep.em_membase = 0;
1722 }
1723 
1724 /*********************************************************************
1725  *
1726  *  Initialize the hardware to a configuration as specified by the
1727  *  em_softc structure. The controller is reset, the EEPROM is
1728  *  verified, the MAC address is set, then the shared initialization
1729  *  routines are called.
1730  *
1731  **********************************************************************/
1732 int
1733 em_hardware_init(struct em_softc *sc)
1734 {
1735 	uint32_t ret_val;
1736 	u_int16_t rx_buffer_size;
1737 
1738 	INIT_DEBUGOUT("em_hardware_init: begin");
1739 	/* Issue a global reset */
1740 	em_reset_hw(&sc->hw);
1741 
1742 	/* When hardware is reset, fifo_head is also reset */
1743 	sc->tx_fifo_head = 0;
1744 
1745 	/* Make sure we have a good EEPROM before we read from it */
1746 	if (em_validate_eeprom_checksum(&sc->hw) < 0) {
1747 		/*
1748 		 * Some PCIe parts fail the first check due to
1749 		 * the link being in sleep state, call it again,
1750 		 * if it fails a second time its a real issue.
1751 		 */
1752 		if (em_validate_eeprom_checksum(&sc->hw) < 0) {
1753 			printf("%s: The EEPROM Checksum Is Not Valid\n",
1754 			       sc->sc_dv.dv_xname);
1755 			return (EIO);
1756 		}
1757 	}
1758 
1759 	if (em_read_part_num(&sc->hw, &(sc->part_num)) < 0) {
1760 		printf("%s: EEPROM read error while reading part number\n",
1761 		       sc->sc_dv.dv_xname);
1762 		return (EIO);
1763 	}
1764 
1765 	/* Set up smart power down as default off on newer adapters */
1766 	if (!em_smart_pwr_down &&
1767 	     (sc->hw.mac_type == em_82571 ||
1768 	      sc->hw.mac_type == em_82572 ||
1769 	      sc->hw.mac_type == em_82575 ||
1770 	      sc->hw.mac_type == em_82580 ||
1771 	      sc->hw.mac_type == em_i350)) {
1772 		uint16_t phy_tmp = 0;
1773 
1774 		/* Speed up time to link by disabling smart power down */
1775 		em_read_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
1776 		phy_tmp &= ~IGP02E1000_PM_SPD;
1777 		em_write_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp);
1778 	}
1779 
1780 	/*
1781 	 * These parameters control the automatic generation (Tx) and
1782 	 * response (Rx) to Ethernet PAUSE frames.
1783 	 * - High water mark should allow for at least two frames to be
1784 	 *   received after sending an XOFF.
1785 	 * - Low water mark works best when it is very near the high water mark.
1786 	 *   This allows the receiver to restart by sending XON when it has
1787 	 *   drained a bit.  Here we use an arbitary value of 1500 which will
1788 	 *   restart after one full frame is pulled from the buffer.  There
1789 	 *   could be several smaller frames in the buffer and if so they will
1790 	 *   not trigger the XON until their total number reduces the buffer
1791 	 *   by 1500.
1792 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1793 	 */
1794 	rx_buffer_size = ((E1000_READ_REG(&sc->hw, PBA) & 0xffff) << 10 );
1795 
1796 	sc->hw.fc_high_water = rx_buffer_size -
1797 	    EM_ROUNDUP(sc->hw.max_frame_size, 1024);
1798 	sc->hw.fc_low_water = sc->hw.fc_high_water - 1500;
1799 	if (sc->hw.mac_type == em_80003es2lan)
1800 		sc->hw.fc_pause_time = 0xFFFF;
1801 	else
1802 		sc->hw.fc_pause_time = 1000;
1803 	sc->hw.fc_send_xon = TRUE;
1804 	sc->hw.fc = E1000_FC_FULL;
1805 
1806 	if ((ret_val = em_init_hw(&sc->hw)) != 0) {
1807 		if (ret_val == E1000_DEFER_INIT) {
1808 			INIT_DEBUGOUT("\nHardware Initialization Deferred ");
1809 			return (EAGAIN);
1810 		}
1811 		printf("%s: Hardware Initialization Failed",
1812 		       sc->sc_dv.dv_xname);
1813 		return (EIO);
1814 	}
1815 
1816 	em_check_for_link(&sc->hw);
1817 
1818 	return (0);
1819 }
1820 
1821 /*********************************************************************
1822  *
1823  *  Setup networking device structure and register an interface.
1824  *
1825  **********************************************************************/
1826 void
1827 em_setup_interface(struct em_softc *sc)
1828 {
1829 	struct ifnet   *ifp;
1830 	u_char fiber_type = IFM_1000_SX;
1831 
1832 	INIT_DEBUGOUT("em_setup_interface: begin");
1833 
1834 	ifp = &sc->interface_data.ac_if;
1835 	strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
1836 	ifp->if_softc = sc;
1837 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1838 	ifp->if_ioctl = em_ioctl;
1839 	ifp->if_start = em_start;
1840 	ifp->if_watchdog = em_watchdog;
1841 	ifp->if_hardmtu =
1842 		sc->hw.max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN;
1843 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1844 	IFQ_SET_READY(&ifp->if_snd);
1845 
1846 	m_clsetwms(ifp, MCLBYTES, 4, sc->num_rx_desc);
1847 
1848 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1849 
1850 #if NVLAN > 0
1851 	if (sc->hw.mac_type != em_82575 && sc->hw.mac_type != em_82580 &&
1852 	    sc->hw.mac_type != em_i350)
1853 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1854 #endif
1855 
1856 #ifdef EM_CSUM_OFFLOAD
1857 	if (sc->hw.mac_type >= em_82543)
1858 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4;
1859 #endif
1860 
1861 	/*
1862 	 * Specify the media types supported by this adapter and register
1863 	 * callbacks to update media and link information
1864 	 */
1865 	ifmedia_init(&sc->media, IFM_IMASK, em_media_change,
1866 		     em_media_status);
1867 	if (sc->hw.media_type == em_media_type_fiber ||
1868 	    sc->hw.media_type == em_media_type_internal_serdes) {
1869 		if (sc->hw.mac_type == em_82545)
1870 			fiber_type = IFM_1000_LX;
1871 		ifmedia_add(&sc->media, IFM_ETHER | fiber_type | IFM_FDX,
1872 			    0, NULL);
1873 		ifmedia_add(&sc->media, IFM_ETHER | fiber_type,
1874 			    0, NULL);
1875 	} else {
1876 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1877 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1878 			    0, NULL);
1879 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX,
1880 			    0, NULL);
1881 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1882 			    0, NULL);
1883 		if (sc->hw.phy_type != em_phy_ife) {
1884 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1885 				    0, NULL);
1886 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1887 		}
1888 	}
1889 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1890 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1891 
1892 	if_attach(ifp);
1893 	ether_ifattach(ifp);
1894 }
1895 
1896 int
1897 em_detach(struct device *self, int flags)
1898 {
1899 	struct em_softc *sc = (struct em_softc *)self;
1900 	struct ifnet *ifp = &sc->interface_data.ac_if;
1901 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1902 	pci_chipset_tag_t	pc = pa->pa_pc;
1903 
1904 	if (sc->sc_intrhand)
1905 		pci_intr_disestablish(pc, sc->sc_intrhand);
1906 	sc->sc_intrhand = 0;
1907 
1908 	em_stop(sc, 1);
1909 
1910 	em_free_pci_resources(sc);
1911 	em_dma_free(sc, &sc->rxdma);
1912 	em_dma_free(sc, &sc->txdma);
1913 
1914 	ether_ifdetach(ifp);
1915 	if_detach(ifp);
1916 
1917 	return (0);
1918 }
1919 
1920 int
1921 em_activate(struct device *self, int act)
1922 {
1923 	struct em_softc *sc = (struct em_softc *)self;
1924 	struct ifnet *ifp = &sc->interface_data.ac_if;
1925 	int rv = 0;
1926 
1927 	switch (act) {
1928 	case DVACT_QUIESCE:
1929 		rv = config_activate_children(self, act);
1930 		break;
1931 	case DVACT_SUSPEND:
1932 		if (ifp->if_flags & IFF_RUNNING)
1933 			em_stop(sc, 0);
1934 		/* We have no children atm, but we will soon */
1935 		rv = config_activate_children(self, act);
1936 		break;
1937 	case DVACT_RESUME:
1938 		rv = config_activate_children(self, act);
1939 		if (ifp->if_flags & IFF_UP)
1940 			em_init(sc);
1941 		break;
1942 	}
1943 	return (rv);
1944 }
1945 
1946 /*********************************************************************
1947  *
1948  *  Workaround for SmartSpeed on 82541 and 82547 controllers
1949  *
1950  **********************************************************************/
1951 void
1952 em_smartspeed(struct em_softc *sc)
1953 {
1954 	uint16_t phy_tmp;
1955 
1956 	if (sc->link_active || (sc->hw.phy_type != em_phy_igp) ||
1957 	    !sc->hw.autoneg || !(sc->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1958 		return;
1959 
1960 	if (sc->smartspeed == 0) {
1961 		/* If Master/Slave config fault is asserted twice,
1962 		 * we assume back-to-back */
1963 		em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
1964 		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1965 			return;
1966 		em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
1967 		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1968 			em_read_phy_reg(&sc->hw, PHY_1000T_CTRL,
1969 					&phy_tmp);
1970 			if (phy_tmp & CR_1000T_MS_ENABLE) {
1971 				phy_tmp &= ~CR_1000T_MS_ENABLE;
1972 				em_write_phy_reg(&sc->hw,
1973 						    PHY_1000T_CTRL, phy_tmp);
1974 				sc->smartspeed++;
1975 				if (sc->hw.autoneg &&
1976 				    !em_phy_setup_autoneg(&sc->hw) &&
1977 				    !em_read_phy_reg(&sc->hw, PHY_CTRL,
1978 						       &phy_tmp)) {
1979 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
1980 						    MII_CR_RESTART_AUTO_NEG);
1981 					em_write_phy_reg(&sc->hw,
1982 							 PHY_CTRL, phy_tmp);
1983 				}
1984 			}
1985 		}
1986 		return;
1987 	} else if (sc->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1988 		/* If still no link, perhaps using 2/3 pair cable */
1989 		em_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
1990 		phy_tmp |= CR_1000T_MS_ENABLE;
1991 		em_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
1992 		if (sc->hw.autoneg &&
1993 		    !em_phy_setup_autoneg(&sc->hw) &&
1994 		    !em_read_phy_reg(&sc->hw, PHY_CTRL, &phy_tmp)) {
1995 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
1996 				    MII_CR_RESTART_AUTO_NEG);
1997 			em_write_phy_reg(&sc->hw, PHY_CTRL, phy_tmp);
1998 		}
1999 	}
2000 	/* Restart process after EM_SMARTSPEED_MAX iterations */
2001 	if (sc->smartspeed++ == EM_SMARTSPEED_MAX)
2002 		sc->smartspeed = 0;
2003 }
2004 
2005 /*
2006  * Manage DMA'able memory.
2007  */
2008 int
2009 em_dma_malloc(struct em_softc *sc, bus_size_t size,
2010     struct em_dma_alloc *dma, int mapflags)
2011 {
2012 	int r;
2013 
2014 	dma->dma_tag = sc->osdep.em_pa.pa_dmat;
2015 	r = bus_dmamap_create(dma->dma_tag, size, 1,
2016 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
2017 	if (r != 0) {
2018 		printf("%s: em_dma_malloc: bus_dmamap_create failed; "
2019 			"error %u\n", sc->sc_dv.dv_xname, r);
2020 		goto fail_0;
2021 	}
2022 
2023 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
2024 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
2025 	if (r != 0) {
2026 		printf("%s: em_dma_malloc: bus_dmammem_alloc failed; "
2027 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
2028 			(unsigned long)size, r);
2029 		goto fail_1;
2030 	}
2031 
2032 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
2033 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
2034 	if (r != 0) {
2035 		printf("%s: em_dma_malloc: bus_dmammem_map failed; "
2036 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
2037 			(unsigned long)size, r);
2038 		goto fail_2;
2039 	}
2040 
2041 	r = bus_dmamap_load(sc->osdep.em_pa.pa_dmat, dma->dma_map,
2042 			    dma->dma_vaddr, size, NULL,
2043 			    mapflags | BUS_DMA_NOWAIT);
2044 	if (r != 0) {
2045 		printf("%s: em_dma_malloc: bus_dmamap_load failed; "
2046 			"error %u\n", sc->sc_dv.dv_xname, r);
2047 		goto fail_3;
2048 	}
2049 
2050 	dma->dma_size = size;
2051 	return (0);
2052 
2053 fail_3:
2054 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
2055 fail_2:
2056 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
2057 fail_1:
2058 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
2059 fail_0:
2060 	dma->dma_map = NULL;
2061 	dma->dma_tag = NULL;
2062 
2063 	return (r);
2064 }
2065 
2066 void
2067 em_dma_free(struct em_softc *sc, struct em_dma_alloc *dma)
2068 {
2069 	if (dma->dma_tag == NULL)
2070 		return;
2071 
2072 	if (dma->dma_map != NULL) {
2073 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
2074 		    dma->dma_map->dm_mapsize,
2075 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2076 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2077 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
2078 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
2079 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
2080 	}
2081 	dma->dma_tag = NULL;
2082 }
2083 
2084 /*********************************************************************
2085  *
2086  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2087  *  the information needed to transmit a packet on the wire.
2088  *
2089  **********************************************************************/
2090 int
2091 em_allocate_transmit_structures(struct em_softc *sc)
2092 {
2093 	if (!(sc->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2094 	    sc->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2095 		printf("%s: Unable to allocate tx_buffer memory\n",
2096 		       sc->sc_dv.dv_xname);
2097 		return (ENOMEM);
2098 	}
2099 
2100 	return (0);
2101 }
2102 
2103 /*********************************************************************
2104  *
2105  *  Allocate and initialize transmit structures.
2106  *
2107  **********************************************************************/
2108 int
2109 em_setup_transmit_structures(struct em_softc *sc)
2110 {
2111 	struct  em_buffer *tx_buffer;
2112 	int error, i;
2113 
2114 	if ((error = em_allocate_transmit_structures(sc)) != 0)
2115 		goto fail;
2116 
2117 	bzero((void *) sc->tx_desc_base,
2118 	      (sizeof(struct em_tx_desc)) * sc->num_tx_desc);
2119 
2120 	sc->txtag = sc->osdep.em_pa.pa_dmat;
2121 
2122 	tx_buffer = sc->tx_buffer_area;
2123 	for (i = 0; i < sc->num_tx_desc; i++) {
2124 		error = bus_dmamap_create(sc->txtag, MAX_JUMBO_FRAME_SIZE,
2125 			    EM_MAX_SCATTER, MAX_JUMBO_FRAME_SIZE, 0,
2126 			    BUS_DMA_NOWAIT, &tx_buffer->map);
2127 		if (error != 0) {
2128 			printf("%s: Unable to create TX DMA map\n",
2129 			    sc->sc_dv.dv_xname);
2130 			goto fail;
2131 		}
2132 		tx_buffer++;
2133 	}
2134 
2135 	sc->next_avail_tx_desc = 0;
2136 	sc->next_tx_to_clean = 0;
2137 
2138 	/* Set number of descriptors available */
2139 	sc->num_tx_desc_avail = sc->num_tx_desc;
2140 
2141 	/* Set checksum context */
2142 	sc->active_checksum_context = OFFLOAD_NONE;
2143 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2144 	    sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2145 
2146 	return (0);
2147 
2148 fail:
2149 	em_free_transmit_structures(sc);
2150 	return (error);
2151 }
2152 
2153 /*********************************************************************
2154  *
2155  *  Enable transmit unit.
2156  *
2157  **********************************************************************/
2158 void
2159 em_initialize_transmit_unit(struct em_softc *sc)
2160 {
2161 	u_int32_t	reg_tctl, reg_tipg = 0;
2162 	u_int64_t	bus_addr;
2163 
2164 	INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2165 
2166 	/* Setup the Base and Length of the Tx Descriptor Ring */
2167 	bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
2168 	E1000_WRITE_REG(&sc->hw, TDLEN,
2169 			sc->num_tx_desc *
2170 			sizeof(struct em_tx_desc));
2171 	E1000_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2172 	E1000_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
2173 
2174 	/* Setup the HW Tx Head and Tail descriptor pointers */
2175 	E1000_WRITE_REG(&sc->hw, TDT, 0);
2176 	E1000_WRITE_REG(&sc->hw, TDH, 0);
2177 
2178 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2179 		     E1000_READ_REG(&sc->hw, TDBAL),
2180 		     E1000_READ_REG(&sc->hw, TDLEN));
2181 
2182 	/* Set the default values for the Tx Inter Packet Gap timer */
2183 	switch (sc->hw.mac_type) {
2184 	case em_82542_rev2_0:
2185 	case em_82542_rev2_1:
2186 		reg_tipg = DEFAULT_82542_TIPG_IPGT;
2187 		reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2188 		reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2189 		break;
2190 	case em_80003es2lan:
2191 		reg_tipg = DEFAULT_82543_TIPG_IPGR1;
2192 		reg_tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2193 		break;
2194 	default:
2195 		if (sc->hw.media_type == em_media_type_fiber ||
2196 		    sc->hw.media_type == em_media_type_internal_serdes)
2197 			reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2198 		else
2199 			reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2200 		reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2201 		reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2202 	}
2203 
2204 
2205 	E1000_WRITE_REG(&sc->hw, TIPG, reg_tipg);
2206 	E1000_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
2207 	if (sc->hw.mac_type >= em_82540)
2208 		E1000_WRITE_REG(&sc->hw, TADV, sc->tx_abs_int_delay);
2209 
2210 	/* Setup Transmit Descriptor Base Settings */
2211 	sc->txd_cmd = E1000_TXD_CMD_IFCS;
2212 
2213 	if (sc->hw.mac_type == em_82575 || sc->hw.mac_type == em_82580 ||
2214 	    sc->hw.mac_type == em_i350) {
2215 		/* 82575/6 need to enable the TX queue and lack the IDE bit */
2216 		reg_tctl = E1000_READ_REG(&sc->hw, TXDCTL);
2217 		reg_tctl |= E1000_TXDCTL_QUEUE_ENABLE;
2218 		E1000_WRITE_REG(&sc->hw, TXDCTL, reg_tctl);
2219 	} else if (sc->tx_int_delay > 0)
2220 		sc->txd_cmd |= E1000_TXD_CMD_IDE;
2221 
2222 	/* Program the Transmit Control Register */
2223 	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2224 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2225 	if (sc->hw.mac_type >= em_82571)
2226 		reg_tctl |= E1000_TCTL_MULR;
2227 	if (sc->link_duplex == FULL_DUPLEX)
2228 		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2229 	else
2230 		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2231 	/* This write will effectively turn on the transmit unit */
2232 	E1000_WRITE_REG(&sc->hw, TCTL, reg_tctl);
2233 }
2234 
2235 /*********************************************************************
2236  *
2237  *  Free all transmit related data structures.
2238  *
2239  **********************************************************************/
2240 void
2241 em_free_transmit_structures(struct em_softc *sc)
2242 {
2243 	struct em_buffer   *tx_buffer;
2244 	int		i;
2245 
2246 	INIT_DEBUGOUT("free_transmit_structures: begin");
2247 
2248 	if (sc->tx_buffer_area != NULL) {
2249 		tx_buffer = sc->tx_buffer_area;
2250 		for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2251 			if (tx_buffer->map != NULL &&
2252 			    tx_buffer->map->dm_nsegs > 0) {
2253 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
2254 				    0, tx_buffer->map->dm_mapsize,
2255 				    BUS_DMASYNC_POSTWRITE);
2256 				bus_dmamap_unload(sc->txtag,
2257 				    tx_buffer->map);
2258 			}
2259 			if (tx_buffer->m_head != NULL) {
2260 				m_freem(tx_buffer->m_head);
2261 				tx_buffer->m_head = NULL;
2262 			}
2263 			if (tx_buffer->map != NULL) {
2264 				bus_dmamap_destroy(sc->txtag,
2265 				    tx_buffer->map);
2266 				tx_buffer->map = NULL;
2267 			}
2268 		}
2269 	}
2270 	if (sc->tx_buffer_area != NULL) {
2271 		free(sc->tx_buffer_area, M_DEVBUF);
2272 		sc->tx_buffer_area = NULL;
2273 	}
2274 	if (sc->txtag != NULL)
2275 		sc->txtag = NULL;
2276 }
2277 
2278 #ifdef EM_CSUM_OFFLOAD
2279 /*********************************************************************
2280  *
2281  *  The offload context needs to be set when we transfer the first
2282  *  packet of a particular protocol (TCP/UDP). We change the
2283  *  context only if the protocol type changes.
2284  *
2285  **********************************************************************/
2286 void
2287 em_transmit_checksum_setup(struct em_softc *sc, struct mbuf *mp,
2288     u_int32_t *txd_upper, u_int32_t *txd_lower)
2289 {
2290 	struct em_context_desc *TXD;
2291 	struct em_buffer *tx_buffer;
2292 	int curr_txd;
2293 
2294 	if (mp->m_pkthdr.csum_flags) {
2295 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) {
2296 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2297 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2298 			if (sc->active_checksum_context == OFFLOAD_TCP_IP)
2299 				return;
2300 			else
2301 				sc->active_checksum_context = OFFLOAD_TCP_IP;
2302 		} else if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) {
2303 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2304 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2305 			if (sc->active_checksum_context == OFFLOAD_UDP_IP)
2306 				return;
2307 			else
2308 				sc->active_checksum_context = OFFLOAD_UDP_IP;
2309 		} else {
2310 			*txd_upper = 0;
2311 			*txd_lower = 0;
2312 			return;
2313 		}
2314 	} else {
2315 		*txd_upper = 0;
2316 		*txd_lower = 0;
2317 		return;
2318 	}
2319 
2320 	/* If we reach this point, the checksum offload context
2321 	 * needs to be reset.
2322 	 */
2323 	curr_txd = sc->next_avail_tx_desc;
2324 	tx_buffer = &sc->tx_buffer_area[curr_txd];
2325 	TXD = (struct em_context_desc *) &sc->tx_desc_base[curr_txd];
2326 
2327 	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2328 	TXD->lower_setup.ip_fields.ipcso =
2329 	    ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2330 	TXD->lower_setup.ip_fields.ipcse =
2331 	    htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2332 
2333 	TXD->upper_setup.tcp_fields.tucss =
2334 	    ETHER_HDR_LEN + sizeof(struct ip);
2335 	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2336 
2337 	if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
2338 		TXD->upper_setup.tcp_fields.tucso =
2339 		    ETHER_HDR_LEN + sizeof(struct ip) +
2340 		    offsetof(struct tcphdr, th_sum);
2341 	} else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
2342 		TXD->upper_setup.tcp_fields.tucso =
2343 		    ETHER_HDR_LEN + sizeof(struct ip) +
2344 		    offsetof(struct udphdr, uh_sum);
2345 	}
2346 
2347 	TXD->tcp_seg_setup.data = htole32(0);
2348 	TXD->cmd_and_length = htole32(sc->txd_cmd | E1000_TXD_CMD_DEXT);
2349 
2350 	tx_buffer->m_head = NULL;
2351 	tx_buffer->next_eop = -1;
2352 
2353 	if (++curr_txd == sc->num_tx_desc)
2354 		curr_txd = 0;
2355 
2356 	sc->num_tx_desc_avail--;
2357 	sc->next_avail_tx_desc = curr_txd;
2358 }
2359 #endif /* EM_CSUM_OFFLOAD */
2360 
2361 /**********************************************************************
2362  *
2363  *  Examine each tx_buffer in the used queue. If the hardware is done
2364  *  processing the packet then free associated resources. The
2365  *  tx_buffer is put back on the free queue.
2366  *
2367  **********************************************************************/
2368 void
2369 em_txeof(struct em_softc *sc)
2370 {
2371 	int first, last, done, num_avail;
2372 	struct em_buffer *tx_buffer;
2373 	struct em_tx_desc   *tx_desc, *eop_desc;
2374 	struct ifnet   *ifp = &sc->interface_data.ac_if;
2375 
2376 	if (sc->num_tx_desc_avail == sc->num_tx_desc)
2377 		return;
2378 
2379 	num_avail = sc->num_tx_desc_avail;
2380 	first = sc->next_tx_to_clean;
2381 	tx_desc = &sc->tx_desc_base[first];
2382 	tx_buffer = &sc->tx_buffer_area[first];
2383 	last = tx_buffer->next_eop;
2384 	eop_desc = &sc->tx_desc_base[last];
2385 
2386 	/*
2387 	 * What this does is get the index of the
2388 	 * first descriptor AFTER the EOP of the
2389 	 * first packet, that way we can do the
2390 	 * simple comparison on the inner while loop.
2391 	 */
2392 	if (++last == sc->num_tx_desc)
2393 		last = 0;
2394 	done = last;
2395 
2396 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2397 	    sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2398 	while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2399 		/* We clean the range of the packet */
2400 		while (first != done) {
2401 			tx_desc->upper.data = 0;
2402 			tx_desc->lower.data = 0;
2403 			num_avail++;
2404 
2405 			if (tx_buffer->m_head != NULL) {
2406 				ifp->if_opackets++;
2407 				if (tx_buffer->map->dm_nsegs > 0) {
2408 					bus_dmamap_sync(sc->txtag,
2409 					    tx_buffer->map, 0,
2410 					    tx_buffer->map->dm_mapsize,
2411 					    BUS_DMASYNC_POSTWRITE);
2412 					bus_dmamap_unload(sc->txtag,
2413 					    tx_buffer->map);
2414 				}
2415 				m_freem(tx_buffer->m_head);
2416 				tx_buffer->m_head = NULL;
2417 			}
2418 			tx_buffer->next_eop = -1;
2419 
2420 			if (++first == sc->num_tx_desc)
2421 				first = 0;
2422 
2423 			tx_buffer = &sc->tx_buffer_area[first];
2424 			tx_desc = &sc->tx_desc_base[first];
2425 		}
2426 		/* See if we can continue to the next packet */
2427 		last = tx_buffer->next_eop;
2428 		if (last != -1) {
2429 			eop_desc = &sc->tx_desc_base[last];
2430 			/* Get new done point */
2431 			if (++last == sc->num_tx_desc)
2432 				last = 0;
2433 			done = last;
2434 		} else
2435 			break;
2436 	}
2437 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2438 	    sc->txdma.dma_map->dm_mapsize,
2439 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2440 
2441 	sc->next_tx_to_clean = first;
2442 
2443 	/*
2444 	 * If we have enough room, clear IFF_OACTIVE to tell the stack
2445 	 * that it is OK to send packets.
2446 	 * If there are no pending descriptors, clear the timeout. Otherwise,
2447 	 * if some descriptors have been freed, restart the timeout.
2448 	 */
2449 	if (num_avail > EM_TX_CLEANUP_THRESHOLD)
2450 		ifp->if_flags &= ~IFF_OACTIVE;
2451 
2452 	/* All clean, turn off the timer */
2453 	if (num_avail == sc->num_tx_desc)
2454 		ifp->if_timer = 0;
2455 	/* Some cleaned, reset the timer */
2456 	else if (num_avail != sc->num_tx_desc_avail)
2457 		ifp->if_timer = EM_TX_TIMEOUT;
2458 
2459 	sc->num_tx_desc_avail = num_avail;
2460 }
2461 
2462 /*********************************************************************
2463  *
2464  *  Get a buffer from system mbuf buffer pool.
2465  *
2466  **********************************************************************/
2467 int
2468 em_get_buf(struct em_softc *sc, int i)
2469 {
2470 	struct mbuf    *m;
2471 	struct em_buffer *pkt;
2472 	struct em_rx_desc *desc;
2473 	int error;
2474 
2475 	pkt = &sc->rx_buffer_area[i];
2476 	desc = &sc->rx_desc_base[i];
2477 
2478 	if (pkt->m_head != NULL) {
2479 		printf("%s: em_get_buf: slot %d already has an mbuf\n",
2480 		    sc->sc_dv.dv_xname, i);
2481 		return (ENOBUFS);
2482 	}
2483 
2484 	m = MCLGETI(NULL, M_DONTWAIT, &sc->interface_data.ac_if, MCLBYTES);
2485 	if (!m) {
2486 		sc->mbuf_cluster_failed++;
2487 		return (ENOBUFS);
2488 	}
2489 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2490 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2491 		m_adj(m, ETHER_ALIGN);
2492 
2493 	error = bus_dmamap_load_mbuf(sc->rxtag, pkt->map, m, BUS_DMA_NOWAIT);
2494 	if (error) {
2495 		m_freem(m);
2496 		return (error);
2497 	}
2498 
2499 	bus_dmamap_sync(sc->rxtag, pkt->map, 0, pkt->map->dm_mapsize,
2500 	    BUS_DMASYNC_PREREAD);
2501 	pkt->m_head = m;
2502 
2503 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2504 	    sizeof(*desc) * i, sizeof(*desc), BUS_DMASYNC_POSTWRITE);
2505 
2506 	bzero(desc, sizeof(*desc));
2507 	desc->buffer_addr = htole64(pkt->map->dm_segs[0].ds_addr);
2508 
2509 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2510 	    sizeof(*desc) * i, sizeof(*desc), BUS_DMASYNC_PREWRITE);
2511 
2512 	sc->rx_ndescs++;
2513 
2514 	return (0);
2515 }
2516 
2517 /*********************************************************************
2518  *
2519  *  Allocate memory for rx_buffer structures. Since we use one
2520  *  rx_buffer per received packet, the maximum number of rx_buffer's
2521  *  that we'll need is equal to the number of receive descriptors
2522  *  that we've allocated.
2523  *
2524  **********************************************************************/
2525 int
2526 em_allocate_receive_structures(struct em_softc *sc)
2527 {
2528 	int		i, error;
2529 	struct em_buffer *rx_buffer;
2530 
2531 	if (!(sc->rx_buffer_area = malloc(sizeof(struct em_buffer) *
2532 	    sc->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2533 		printf("%s: Unable to allocate rx_buffer memory\n",
2534 		       sc->sc_dv.dv_xname);
2535 		return (ENOMEM);
2536 	}
2537 
2538 	sc->rxtag = sc->osdep.em_pa.pa_dmat;
2539 
2540 	rx_buffer = sc->rx_buffer_area;
2541 	for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
2542 		error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
2543 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rx_buffer->map);
2544 		if (error != 0) {
2545 			printf("%s: em_allocate_receive_structures: "
2546 			    "bus_dmamap_create failed; error %u\n",
2547 			    sc->sc_dv.dv_xname, error);
2548 			goto fail;
2549 		}
2550 		rx_buffer->m_head = NULL;
2551 	}
2552 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
2553 	    sc->rxdma.dma_map->dm_mapsize,
2554 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2555 
2556         return (0);
2557 
2558 fail:
2559 	em_free_receive_structures(sc);
2560 	return (error);
2561 }
2562 
2563 /*********************************************************************
2564  *
2565  *  Allocate and initialize receive structures.
2566  *
2567  **********************************************************************/
2568 int
2569 em_setup_receive_structures(struct em_softc *sc)
2570 {
2571 	bzero((void *) sc->rx_desc_base,
2572 	    (sizeof(struct em_rx_desc)) * sc->num_rx_desc);
2573 
2574 	if (em_allocate_receive_structures(sc))
2575 		return (ENOMEM);
2576 
2577 	/* Setup our descriptor pointers */
2578 	sc->next_rx_desc_to_check = 0;
2579 	sc->last_rx_desc_filled = sc->num_rx_desc - 1;
2580 	sc->rx_ndescs = 0;
2581 
2582 	em_rxfill(sc);
2583 	if (sc->rx_ndescs < 1) {
2584 		printf("%s: unable to fill any rx descriptors\n",
2585 		    sc->sc_dv.dv_xname);
2586 	}
2587 
2588 	return (0);
2589 }
2590 
2591 /*********************************************************************
2592  *
2593  *  Enable receive unit.
2594  *
2595  **********************************************************************/
2596 void
2597 em_initialize_receive_unit(struct em_softc *sc)
2598 {
2599 	u_int32_t	reg_rctl;
2600 	u_int32_t	reg_rxcsum;
2601 	u_int64_t	bus_addr;
2602 
2603 	INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2604 
2605 	/* Make sure receives are disabled while setting up the descriptor ring */
2606 	E1000_WRITE_REG(&sc->hw, RCTL, 0);
2607 
2608 	/* Set the Receive Delay Timer Register */
2609 	E1000_WRITE_REG(&sc->hw, RDTR,
2610 			sc->rx_int_delay | E1000_RDT_FPDB);
2611 
2612 	if (sc->hw.mac_type >= em_82540) {
2613 		if (sc->rx_int_delay)
2614 			E1000_WRITE_REG(&sc->hw, RADV, sc->rx_abs_int_delay);
2615 
2616 		/* Set the interrupt throttling rate.  Value is calculated
2617 		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2618 		E1000_WRITE_REG(&sc->hw, ITR, DEFAULT_ITR);
2619 	}
2620 
2621 	/* Setup the Base and Length of the Rx Descriptor Ring */
2622 	bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
2623 	E1000_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
2624 			sizeof(struct em_rx_desc));
2625 	E1000_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2626 	E1000_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
2627 
2628 	/* Setup the Receive Control Register */
2629 	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2630 	    E1000_RCTL_RDMTS_HALF |
2631 	    (sc->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2632 
2633 	if (sc->hw.tbi_compatibility_on == TRUE)
2634 		reg_rctl |= E1000_RCTL_SBP;
2635 
2636 	/*
2637 	 * The i350 has a bug where it always strips the CRC whether
2638 	 * asked to or not.  So ask for stripped CRC here and
2639 	 * cope in rxeof
2640 	 */
2641 	if (sc->hw.mac_type == em_i350)
2642 		reg_rctl |= E1000_RCTL_SECRC;
2643 
2644 	switch (sc->rx_buffer_len) {
2645 	default:
2646 	case EM_RXBUFFER_2048:
2647 		reg_rctl |= E1000_RCTL_SZ_2048;
2648 		break;
2649 	case EM_RXBUFFER_4096:
2650 		reg_rctl |= E1000_RCTL_SZ_4096|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2651 		break;
2652 	case EM_RXBUFFER_8192:
2653 		reg_rctl |= E1000_RCTL_SZ_8192|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2654 		break;
2655 	case EM_RXBUFFER_16384:
2656 		reg_rctl |= E1000_RCTL_SZ_16384|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2657 		break;
2658 	}
2659 
2660 	if (sc->hw.max_frame_size != ETHER_MAX_LEN)
2661 		reg_rctl |= E1000_RCTL_LPE;
2662 
2663 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2664 	if (sc->hw.mac_type >= em_82543) {
2665 		reg_rxcsum = E1000_READ_REG(&sc->hw, RXCSUM);
2666 		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2667 		E1000_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
2668 	}
2669 
2670 	/*
2671 	 * XXX TEMPORARY WORKAROUND: on some systems with 82573
2672 	 * long latencies are observed, like Lenovo X60.
2673 	 */
2674 	if (sc->hw.mac_type == em_82573)
2675 		E1000_WRITE_REG(&sc->hw, RDTR, 0x20);
2676 
2677 	if (sc->hw.mac_type == em_82575 || sc->hw.mac_type == em_82580 ||
2678 	    sc->hw.mac_type == em_i350) {
2679 		/* 82575/6 need to enable the RX queue */
2680 		uint32_t reg;
2681 		reg = E1000_READ_REG(&sc->hw, RXDCTL);
2682 		reg |= E1000_RXDCTL_QUEUE_ENABLE;
2683 		E1000_WRITE_REG(&sc->hw, RXDCTL, reg);
2684 	}
2685 
2686 	/* Enable Receives */
2687 	E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
2688 
2689 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2690 	E1000_WRITE_REG(&sc->hw, RDH, 0);
2691 	E1000_WRITE_REG(&sc->hw, RDT, sc->last_rx_desc_filled);
2692 }
2693 
2694 /*********************************************************************
2695  *
2696  *  Free receive related data structures.
2697  *
2698  **********************************************************************/
2699 void
2700 em_free_receive_structures(struct em_softc *sc)
2701 {
2702 	struct em_buffer   *rx_buffer;
2703 	int		i;
2704 
2705 	INIT_DEBUGOUT("free_receive_structures: begin");
2706 
2707 	if (sc->rx_buffer_area != NULL) {
2708 		rx_buffer = sc->rx_buffer_area;
2709 		for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
2710 			if (rx_buffer->m_head != NULL) {
2711 				bus_dmamap_sync(sc->rxtag, rx_buffer->map,
2712 				    0, rx_buffer->map->dm_mapsize,
2713 				    BUS_DMASYNC_POSTREAD);
2714 				bus_dmamap_unload(sc->rxtag, rx_buffer->map);
2715 				m_freem(rx_buffer->m_head);
2716 				rx_buffer->m_head = NULL;
2717 			}
2718 			bus_dmamap_destroy(sc->rxtag, rx_buffer->map);
2719 		}
2720 	}
2721 	if (sc->rx_buffer_area != NULL) {
2722 		free(sc->rx_buffer_area, M_DEVBUF);
2723 		sc->rx_buffer_area = NULL;
2724 	}
2725 	if (sc->rxtag != NULL)
2726 		sc->rxtag = NULL;
2727 
2728 	if (sc->fmp != NULL) {
2729 		m_freem(sc->fmp);
2730 		sc->fmp = NULL;
2731 		sc->lmp = NULL;
2732 	}
2733 }
2734 
2735 #ifdef __STRICT_ALIGNMENT
2736 void
2737 em_realign(struct em_softc *sc, struct mbuf *m, u_int16_t *prev_len_adj)
2738 {
2739 	unsigned char tmp_align_buf[ETHER_ALIGN];
2740 	int tmp_align_buf_len = 0;
2741 
2742 	/*
2743 	 * The Ethernet payload is not 32-bit aligned when
2744 	 * Jumbo packets are enabled, so on architectures with
2745 	 * strict alignment we need to shift the entire packet
2746 	 * ETHER_ALIGN bytes. Ugh.
2747 	 */
2748 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2749 		return;
2750 
2751 	if (*prev_len_adj > sc->align_buf_len)
2752 		*prev_len_adj -= sc->align_buf_len;
2753 	else
2754 		*prev_len_adj = 0;
2755 
2756 	if (m->m_len > (MCLBYTES - ETHER_ALIGN)) {
2757 		bcopy(m->m_data + (MCLBYTES - ETHER_ALIGN),
2758 		    &tmp_align_buf, ETHER_ALIGN);
2759 		tmp_align_buf_len = m->m_len -
2760 		    (MCLBYTES - ETHER_ALIGN);
2761 		m->m_len -= ETHER_ALIGN;
2762 	}
2763 
2764 	if (m->m_len) {
2765 		bcopy(m->m_data, m->m_data + ETHER_ALIGN, m->m_len);
2766 		if (!sc->align_buf_len)
2767 			m->m_data += ETHER_ALIGN;
2768 	}
2769 
2770 	if (sc->align_buf_len) {
2771 		m->m_len += sc->align_buf_len;
2772 		bcopy(&sc->align_buf, m->m_data, sc->align_buf_len);
2773 	}
2774 
2775 	if (tmp_align_buf_len)
2776 		bcopy(&tmp_align_buf, &sc->align_buf, tmp_align_buf_len);
2777 
2778 	sc->align_buf_len = tmp_align_buf_len;
2779 }
2780 #endif /* __STRICT_ALIGNMENT */
2781 
2782 int
2783 em_rxfill(struct em_softc *sc)
2784 {
2785 	int post = 0;
2786 	int i;
2787 
2788 	i = sc->last_rx_desc_filled;
2789 
2790 	while (sc->rx_ndescs < sc->num_rx_desc) {
2791 		if (++i == sc->num_rx_desc)
2792 			i = 0;
2793 
2794 		if (em_get_buf(sc, i) != 0)
2795 			break;
2796 
2797 		sc->last_rx_desc_filled = i;
2798 		post = 1;
2799 	}
2800 
2801 	return (post);
2802 }
2803 
2804 /*********************************************************************
2805  *
2806  *  This routine executes in interrupt context. It replenishes
2807  *  the mbufs in the descriptor and sends data which has been
2808  *  dma'ed into host memory to upper layer.
2809  *
2810  *  We loop at most count times if count is > 0, or until done if
2811  *  count < 0.
2812  *
2813  *********************************************************************/
2814 void
2815 em_rxeof(struct em_softc *sc, int count)
2816 {
2817 	struct ifnet	    *ifp = &sc->interface_data.ac_if;
2818 	struct mbuf	    *m;
2819 	u_int8_t	    accept_frame = 0;
2820 	u_int8_t	    eop = 0;
2821 	u_int16_t	    len, desc_len, prev_len_adj;
2822 	int		    i;
2823 
2824 	/* Pointer to the receive descriptor being examined. */
2825 	struct em_rx_desc   *desc;
2826 	struct em_buffer    *pkt;
2827 	u_int8_t	    status;
2828 
2829 	ifp = &sc->interface_data.ac_if;
2830 
2831 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2832 		return;
2833 
2834 	i = sc->next_rx_desc_to_check;
2835 
2836 	while (count != 0 && sc->rx_ndescs > 0) {
2837 		m = NULL;
2838 
2839 		desc = &sc->rx_desc_base[i];
2840 		pkt = &sc->rx_buffer_area[i];
2841 
2842 		bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2843 		    sizeof(*desc) * i, sizeof(*desc),
2844 		    BUS_DMASYNC_POSTREAD);
2845 
2846 		status = desc->status;
2847 		if (!ISSET(status, E1000_RXD_STAT_DD)) {
2848 			bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2849 			    sizeof(*desc) * i, sizeof(*desc),
2850 			    BUS_DMASYNC_PREREAD);
2851 			break;
2852 		}
2853 
2854 		/* pull the mbuf off the ring */
2855 		bus_dmamap_sync(sc->rxtag, pkt->map, 0, pkt->map->dm_mapsize,
2856 		    BUS_DMASYNC_POSTREAD);
2857 		bus_dmamap_unload(sc->rxtag, pkt->map);
2858 		m = pkt->m_head;
2859 		pkt->m_head = NULL;
2860 
2861 		if (m == NULL) {
2862 			panic("em_rxeof: NULL mbuf in slot %d "
2863 			    "(nrx %d, filled %d)", i, sc->rx_ndescs,
2864 			    sc->last_rx_desc_filled);
2865 		}
2866 
2867 		m_cluncount(m, 1);
2868 		sc->rx_ndescs--;
2869 
2870 		accept_frame = 1;
2871 		prev_len_adj = 0;
2872 		desc_len = letoh16(desc->length);
2873 
2874 		if (status & E1000_RXD_STAT_EOP) {
2875 			count--;
2876 			eop = 1;
2877 			if (desc_len < ETHER_CRC_LEN) {
2878 				len = 0;
2879 				prev_len_adj = ETHER_CRC_LEN - desc_len;
2880 			} else if (sc->hw.mac_type == em_i350)
2881 				len = desc_len;
2882 			else
2883 				len = desc_len - ETHER_CRC_LEN;
2884 		} else {
2885 			eop = 0;
2886 			len = desc_len;
2887 		}
2888 
2889 		if (desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2890 			u_int8_t last_byte;
2891 			u_int32_t pkt_len = desc_len;
2892 
2893 			if (sc->fmp != NULL)
2894 				pkt_len += sc->fmp->m_pkthdr.len;
2895 
2896 			last_byte = *(mtod(m, caddr_t) + desc_len - 1);
2897 			if (TBI_ACCEPT(&sc->hw, status, desc->errors,
2898 			    pkt_len, last_byte)) {
2899 #ifndef SMALL_KERNEL
2900 				em_tbi_adjust_stats(&sc->hw, &sc->stats,
2901 				    pkt_len, sc->hw.mac_addr);
2902 #endif
2903 				if (len > 0)
2904 					len--;
2905 			} else
2906 				accept_frame = 0;
2907 		}
2908 
2909 		if (accept_frame) {
2910 			/* Assign correct length to the current fragment */
2911 			m->m_len = len;
2912 
2913 			em_realign(sc, m, &prev_len_adj); /* STRICT_ALIGN */
2914 
2915 			if (sc->fmp == NULL) {
2916 				m->m_pkthdr.len = m->m_len;
2917 				sc->fmp = m;	 /* Store the first mbuf */
2918 				sc->lmp = m;
2919 			} else {
2920 				/* Chain mbuf's together */
2921 				m->m_flags &= ~M_PKTHDR;
2922 				/*
2923 				 * Adjust length of previous mbuf in chain if
2924 				 * we received less than 4 bytes in the last
2925 				 * descriptor.
2926 				 */
2927 				if (prev_len_adj > 0) {
2928 					sc->lmp->m_len -= prev_len_adj;
2929 					sc->fmp->m_pkthdr.len -= prev_len_adj;
2930 				}
2931 				sc->lmp->m_next = m;
2932 				sc->lmp = m;
2933 				sc->fmp->m_pkthdr.len += m->m_len;
2934 			}
2935 
2936 			if (eop) {
2937 				ifp->if_ipackets++;
2938 
2939 				m = sc->fmp;
2940 				m->m_pkthdr.rcvif = ifp;
2941 
2942 				em_receive_checksum(sc, desc, m);
2943 #if NVLAN > 0
2944 				if (desc->status & E1000_RXD_STAT_VP) {
2945 					m->m_pkthdr.ether_vtag =
2946 					    letoh16(desc->special);
2947 					m->m_flags |= M_VLANTAG;
2948 				}
2949 #endif
2950 #if NBPFILTER > 0
2951 				if (ifp->if_bpf) {
2952 					bpf_mtap_ether(ifp->if_bpf, m,
2953 					    BPF_DIRECTION_IN);
2954 				}
2955 #endif
2956 
2957 				ether_input_mbuf(ifp, m);
2958 
2959 				sc->fmp = NULL;
2960 				sc->lmp = NULL;
2961 			}
2962 		} else {
2963 			sc->dropped_pkts++;
2964 
2965 			if (sc->fmp != NULL) {
2966  				m_freem(sc->fmp);
2967 				sc->fmp = NULL;
2968 				sc->lmp = NULL;
2969 			}
2970 
2971 			m_freem(m);
2972 		}
2973 
2974 		bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2975 		    sizeof(*desc) * i, sizeof(*desc),
2976 		    BUS_DMASYNC_PREREAD);
2977 
2978 		/* Advance our pointers to the next descriptor. */
2979 		if (++i == sc->num_rx_desc)
2980 			i = 0;
2981 	}
2982 	sc->next_rx_desc_to_check = i;
2983 }
2984 
2985 /*********************************************************************
2986  *
2987  *  Verify that the hardware indicated that the checksum is valid.
2988  *  Inform the stack about the status of checksum so that stack
2989  *  doesn't spend time verifying the checksum.
2990  *
2991  *********************************************************************/
2992 void
2993 em_receive_checksum(struct em_softc *sc, struct em_rx_desc *rx_desc,
2994     struct mbuf *mp)
2995 {
2996 	/* 82543 or newer only */
2997 	if ((sc->hw.mac_type < em_82543) ||
2998 	    /* Ignore Checksum bit is set */
2999 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3000 		mp->m_pkthdr.csum_flags = 0;
3001 		return;
3002 	}
3003 
3004 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3005 		/* Did it pass? */
3006 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3007 			/* IP Checksum Good */
3008 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
3009 
3010 		} else
3011 			mp->m_pkthdr.csum_flags = 0;
3012 	}
3013 
3014 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3015 		/* Did it pass? */
3016 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE))
3017 			mp->m_pkthdr.csum_flags |=
3018 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
3019 	}
3020 }
3021 
3022 /*
3023  * This turns on the hardware offload of the VLAN
3024  * tag insertion and strip
3025  */
3026 void
3027 em_enable_hw_vlans(struct em_softc *sc)
3028 {
3029 	uint32_t ctrl;
3030 
3031 	ctrl = E1000_READ_REG(&sc->hw, CTRL);
3032 	ctrl |= E1000_CTRL_VME;
3033 	E1000_WRITE_REG(&sc->hw, CTRL, ctrl);
3034 }
3035 
3036 void
3037 em_enable_intr(struct em_softc *sc)
3038 {
3039 	E1000_WRITE_REG(&sc->hw, IMS, (IMS_ENABLE_MASK));
3040 }
3041 
3042 void
3043 em_disable_intr(struct em_softc *sc)
3044 {
3045 	/*
3046 	 * The first version of 82542 had an errata where when link
3047 	 * was forced it would stay up even if the cable was disconnected
3048 	 * Sequence errors were used to detect the disconnect and then
3049 	 * the driver would unforce the link.  This code is in the ISR.
3050 	 * For this to work correctly the Sequence error interrupt had
3051 	 * to be enabled all the time.
3052 	 */
3053 
3054 	if (sc->hw.mac_type == em_82542_rev2_0)
3055 		E1000_WRITE_REG(&sc->hw, IMC, (0xffffffff & ~E1000_IMC_RXSEQ));
3056 	else
3057 		E1000_WRITE_REG(&sc->hw, IMC, 0xffffffff);
3058 }
3059 
3060 int
3061 em_is_valid_ether_addr(u_int8_t *addr)
3062 {
3063 	const char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3064 
3065 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
3066 		return (FALSE);
3067 
3068 	return (TRUE);
3069 }
3070 
3071 void
3072 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3073 {
3074 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3075 	pcireg_t val;
3076 
3077 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, reg & ~0x3);
3078 	if (reg & 0x2) {
3079 		val &= 0x0000ffff;
3080 		val |= (*value << 16);
3081 	} else {
3082 		val &= 0xffff0000;
3083 		val |= *value;
3084 	}
3085 	pci_conf_write(pa->pa_pc, pa->pa_tag, reg & ~0x3, val);
3086 }
3087 
3088 void
3089 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3090 {
3091 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3092 	pcireg_t val;
3093 
3094 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, reg & ~0x3);
3095 	if (reg & 0x2)
3096 		*value = (val >> 16) & 0xffff;
3097 	else
3098 		*value = val & 0xffff;
3099 }
3100 
3101 void
3102 em_pci_set_mwi(struct em_hw *hw)
3103 {
3104 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3105 
3106 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3107 		(hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE));
3108 }
3109 
3110 void
3111 em_pci_clear_mwi(struct em_hw *hw)
3112 {
3113 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3114 
3115 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3116 		(hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE));
3117 }
3118 
3119 /*
3120  * We may eventually really do this, but its unnecessary
3121  * for now so we just return unsupported.
3122  */
3123 int32_t
3124 em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3125 {
3126 	return -E1000_NOT_IMPLEMENTED;
3127 }
3128 
3129 /*********************************************************************
3130 * 82544 Coexistence issue workaround.
3131 *    There are 2 issues.
3132 *       1. Transmit Hang issue.
3133 *    To detect this issue, following equation can be used...
3134 *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3135 *          If SUM[3:0] is in between 1 to 4, we will have this issue.
3136 *
3137 *       2. DAC issue.
3138 *    To detect this issue, following equation can be used...
3139 *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3140 *          If SUM[3:0] is in between 9 to c, we will have this issue.
3141 *
3142 *
3143 *    WORKAROUND:
3144 *          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3145 *
3146 *** *********************************************************************/
3147 u_int32_t
3148 em_fill_descriptors(u_int64_t address, u_int32_t length,
3149     PDESC_ARRAY desc_array)
3150 {
3151         /* Since issue is sensitive to length and address.*/
3152         /* Let us first check the address...*/
3153         u_int32_t safe_terminator;
3154         if (length <= 4) {
3155                 desc_array->descriptor[0].address = address;
3156                 desc_array->descriptor[0].length = length;
3157                 desc_array->elements = 1;
3158                 return desc_array->elements;
3159         }
3160         safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3161         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3162         if (safe_terminator == 0   ||
3163         (safe_terminator > 4   &&
3164         safe_terminator < 9)   ||
3165         (safe_terminator > 0xC &&
3166         safe_terminator <= 0xF)) {
3167                 desc_array->descriptor[0].address = address;
3168                 desc_array->descriptor[0].length = length;
3169                 desc_array->elements = 1;
3170                 return desc_array->elements;
3171         }
3172 
3173         desc_array->descriptor[0].address = address;
3174         desc_array->descriptor[0].length = length - 4;
3175         desc_array->descriptor[1].address = address + (length - 4);
3176         desc_array->descriptor[1].length = 4;
3177         desc_array->elements = 2;
3178         return desc_array->elements;
3179 }
3180 
3181 #ifndef SMALL_KERNEL
3182 /**********************************************************************
3183  *
3184  *  Update the board statistics counters.
3185  *
3186  **********************************************************************/
3187 void
3188 em_update_stats_counters(struct em_softc *sc)
3189 {
3190 	struct ifnet   *ifp;
3191 
3192 	if (sc->hw.media_type == em_media_type_copper ||
3193 	    (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU)) {
3194 		sc->stats.symerrs += E1000_READ_REG(&sc->hw, SYMERRS);
3195 		sc->stats.sec += E1000_READ_REG(&sc->hw, SEC);
3196 	}
3197 	sc->stats.crcerrs += E1000_READ_REG(&sc->hw, CRCERRS);
3198 	sc->stats.mpc += E1000_READ_REG(&sc->hw, MPC);
3199 	sc->stats.scc += E1000_READ_REG(&sc->hw, SCC);
3200 	sc->stats.ecol += E1000_READ_REG(&sc->hw, ECOL);
3201 
3202 	sc->stats.mcc += E1000_READ_REG(&sc->hw, MCC);
3203 	sc->stats.latecol += E1000_READ_REG(&sc->hw, LATECOL);
3204 	sc->stats.colc += E1000_READ_REG(&sc->hw, COLC);
3205 	sc->stats.dc += E1000_READ_REG(&sc->hw, DC);
3206 	sc->stats.rlec += E1000_READ_REG(&sc->hw, RLEC);
3207 	sc->stats.xonrxc += E1000_READ_REG(&sc->hw, XONRXC);
3208 	sc->stats.xontxc += E1000_READ_REG(&sc->hw, XONTXC);
3209 	sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, XOFFRXC);
3210 	sc->stats.xofftxc += E1000_READ_REG(&sc->hw, XOFFTXC);
3211 	sc->stats.fcruc += E1000_READ_REG(&sc->hw, FCRUC);
3212 	sc->stats.prc64 += E1000_READ_REG(&sc->hw, PRC64);
3213 	sc->stats.prc127 += E1000_READ_REG(&sc->hw, PRC127);
3214 	sc->stats.prc255 += E1000_READ_REG(&sc->hw, PRC255);
3215 	sc->stats.prc511 += E1000_READ_REG(&sc->hw, PRC511);
3216 	sc->stats.prc1023 += E1000_READ_REG(&sc->hw, PRC1023);
3217 	sc->stats.prc1522 += E1000_READ_REG(&sc->hw, PRC1522);
3218 	sc->stats.gprc += E1000_READ_REG(&sc->hw, GPRC);
3219 	sc->stats.bprc += E1000_READ_REG(&sc->hw, BPRC);
3220 	sc->stats.mprc += E1000_READ_REG(&sc->hw, MPRC);
3221 	sc->stats.gptc += E1000_READ_REG(&sc->hw, GPTC);
3222 
3223 	/* For the 64-bit byte counters the low dword must be read first. */
3224 	/* Both registers clear on the read of the high dword */
3225 
3226 	sc->stats.gorcl += E1000_READ_REG(&sc->hw, GORCL);
3227 	sc->stats.gorch += E1000_READ_REG(&sc->hw, GORCH);
3228 	sc->stats.gotcl += E1000_READ_REG(&sc->hw, GOTCL);
3229 	sc->stats.gotch += E1000_READ_REG(&sc->hw, GOTCH);
3230 
3231 	sc->stats.rnbc += E1000_READ_REG(&sc->hw, RNBC);
3232 	sc->stats.ruc += E1000_READ_REG(&sc->hw, RUC);
3233 	sc->stats.rfc += E1000_READ_REG(&sc->hw, RFC);
3234 	sc->stats.roc += E1000_READ_REG(&sc->hw, ROC);
3235 	sc->stats.rjc += E1000_READ_REG(&sc->hw, RJC);
3236 
3237 	sc->stats.torl += E1000_READ_REG(&sc->hw, TORL);
3238 	sc->stats.torh += E1000_READ_REG(&sc->hw, TORH);
3239 	sc->stats.totl += E1000_READ_REG(&sc->hw, TOTL);
3240 	sc->stats.toth += E1000_READ_REG(&sc->hw, TOTH);
3241 
3242 	sc->stats.tpr += E1000_READ_REG(&sc->hw, TPR);
3243 	sc->stats.tpt += E1000_READ_REG(&sc->hw, TPT);
3244 	sc->stats.ptc64 += E1000_READ_REG(&sc->hw, PTC64);
3245 	sc->stats.ptc127 += E1000_READ_REG(&sc->hw, PTC127);
3246 	sc->stats.ptc255 += E1000_READ_REG(&sc->hw, PTC255);
3247 	sc->stats.ptc511 += E1000_READ_REG(&sc->hw, PTC511);
3248 	sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, PTC1023);
3249 	sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, PTC1522);
3250 	sc->stats.mptc += E1000_READ_REG(&sc->hw, MPTC);
3251 	sc->stats.bptc += E1000_READ_REG(&sc->hw, BPTC);
3252 
3253 	if (sc->hw.mac_type >= em_82543) {
3254 		sc->stats.algnerrc +=
3255 		E1000_READ_REG(&sc->hw, ALGNERRC);
3256 		sc->stats.rxerrc +=
3257 		E1000_READ_REG(&sc->hw, RXERRC);
3258 		sc->stats.tncrs +=
3259 		E1000_READ_REG(&sc->hw, TNCRS);
3260 		sc->stats.cexterr +=
3261 		E1000_READ_REG(&sc->hw, CEXTERR);
3262 		sc->stats.tsctc +=
3263 		E1000_READ_REG(&sc->hw, TSCTC);
3264 		sc->stats.tsctfc +=
3265 		E1000_READ_REG(&sc->hw, TSCTFC);
3266 	}
3267 	ifp = &sc->interface_data.ac_if;
3268 
3269 	/* Fill out the OS statistics structure */
3270 	ifp->if_collisions = sc->stats.colc;
3271 
3272 	/* Rx Errors */
3273 	ifp->if_ierrors =
3274 	    sc->dropped_pkts +
3275 	    sc->stats.rxerrc +
3276 	    sc->stats.crcerrs +
3277 	    sc->stats.algnerrc +
3278 	    sc->stats.ruc + sc->stats.roc +
3279 	    sc->stats.mpc + sc->stats.cexterr +
3280 	    sc->rx_overruns;
3281 
3282 	/* Tx Errors */
3283 	ifp->if_oerrors = sc->stats.ecol + sc->stats.latecol +
3284 	    sc->watchdog_events;
3285 }
3286 
3287 #ifdef EM_DEBUG
3288 /**********************************************************************
3289  *
3290  *  This routine is called only when IFF_DEBUG is enabled.
3291  *  This routine provides a way to take a look at important statistics
3292  *  maintained by the driver and hardware.
3293  *
3294  **********************************************************************/
3295 void
3296 em_print_hw_stats(struct em_softc *sc)
3297 {
3298 	const char * const unit = sc->sc_dv.dv_xname;
3299 
3300 	printf("%s: Excessive collisions = %lld\n", unit,
3301 		(long long)sc->stats.ecol);
3302 	printf("%s: Symbol errors = %lld\n", unit,
3303 		(long long)sc->stats.symerrs);
3304 	printf("%s: Sequence errors = %lld\n", unit,
3305 		(long long)sc->stats.sec);
3306 	printf("%s: Defer count = %lld\n", unit,
3307 		(long long)sc->stats.dc);
3308 
3309 	printf("%s: Missed Packets = %lld\n", unit,
3310 		(long long)sc->stats.mpc);
3311 	printf("%s: Receive No Buffers = %lld\n", unit,
3312 		(long long)sc->stats.rnbc);
3313 	/* RLEC is inaccurate on some hardware, calculate our own */
3314 	printf("%s: Receive Length Errors = %lld\n", unit,
3315 		((long long)sc->stats.roc +
3316 		(long long)sc->stats.ruc));
3317 	printf("%s: Receive errors = %lld\n", unit,
3318 		(long long)sc->stats.rxerrc);
3319 	printf("%s: Crc errors = %lld\n", unit,
3320 		(long long)sc->stats.crcerrs);
3321 	printf("%s: Alignment errors = %lld\n", unit,
3322 		(long long)sc->stats.algnerrc);
3323 	printf("%s: Carrier extension errors = %lld\n", unit,
3324 		(long long)sc->stats.cexterr);
3325 
3326 	printf("%s: RX overruns = %ld\n", unit,
3327 		sc->rx_overruns);
3328 	printf("%s: watchdog timeouts = %ld\n", unit,
3329 		sc->watchdog_events);
3330 
3331 	printf("%s: XON Rcvd = %lld\n", unit,
3332 		(long long)sc->stats.xonrxc);
3333 	printf("%s: XON Xmtd = %lld\n", unit,
3334 		(long long)sc->stats.xontxc);
3335 	printf("%s: XOFF Rcvd = %lld\n", unit,
3336 		(long long)sc->stats.xoffrxc);
3337 	printf("%s: XOFF Xmtd = %lld\n", unit,
3338 		(long long)sc->stats.xofftxc);
3339 
3340 	printf("%s: Good Packets Rcvd = %lld\n", unit,
3341 		(long long)sc->stats.gprc);
3342 	printf("%s: Good Packets Xmtd = %lld\n", unit,
3343 		(long long)sc->stats.gptc);
3344 }
3345 #endif
3346 #endif /* !SMALL_KERNEL */
3347