xref: /openbsd-src/sys/dev/pci/if_em.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /**************************************************************************
2 
3 Copyright (c) 2001-2003, Intel Corporation
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15 
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19 
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31 
32 ***************************************************************************/
33 
34 /* $OpenBSD: if_em.c,v 1.287 2014/07/13 23:10:23 deraadt Exp $ */
35 /* $FreeBSD: if_em.c,v 1.46 2004/09/29 18:28:28 mlaier Exp $ */
36 
37 #include <dev/pci/if_em.h>
38 #include <dev/pci/if_em_soc.h>
39 
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 
44 #define EM_DRIVER_VERSION	"6.2.9"
45 
46 /*********************************************************************
47  *  PCI Device ID Table
48  *********************************************************************/
49 const struct pci_matchid em_devices[] = {
50 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_DPT },
51 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_DPT },
52 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_SPT },
53 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_SPT },
54 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM },
55 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM },
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI },
60 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE },
61 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER },
62 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM },
63 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI },
64 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_LF },
65 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE },
66 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542 },
67 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER },
68 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER },
69 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER },
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD_CPR },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR_K },
86 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES },
87 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_2 },
88 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI },
89 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE },
90 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI },
91 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_AF },
92 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_AT },
93 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER },
94 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER },
95 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR },
96 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR_LP },
97 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FBR },
98 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES },
99 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SDS_DUAL },
100 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SDS_QUAD },
101 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_CPR },
102 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER },
103 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER },
104 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES },
105 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI },
106 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E },
107 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT },
108 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_PM },
109 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L },
110 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L_PL_1 },
111 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L_PL_2 },
112 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573V_PM },
113 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L },
114 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA },
115 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER },
116 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_SERDES },
117 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_CPR },
118 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QP_PM },
119 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576 },
120 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER },
121 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES },
122 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER },
123 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_CU_ET2 },
124 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS },
125 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES },
126 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD },
127 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82577LC },
128 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82577LM },
129 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82578DC },
130 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82578DM },
131 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82579LM },
132 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82579V },
133 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER },
134 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER },
135 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES },
136 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII },
137 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_NF },
138 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_NF },
139 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER },
140 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM },
141 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V },
142 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM },
143 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM_2 },
144 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM_3 },
145 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V },
146 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V_2 },
147 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V_3 },
148 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER },
149 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER },
150 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES },
151 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII },
152 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL },
153 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER },
154 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII },
155 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES },
156 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE },
157 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP },
158 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V },
159 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER },
160 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER },
161 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES },
162 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII },
163 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_BP_1GBPS },
164 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_BP_2_5GBPS },
165 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_SGMII },
166 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_82567V_3 },
167 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE },
168 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE_G },
169 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE_GT },
170 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_AMT },
171 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_C },
172 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_M },
173 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_M_AMT },
174 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_BM },
175 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE },
176 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE_G },
177 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE_GT },
178 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_AMT },
179 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_C },
180 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M },
181 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M_AMT },
182 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M_V },
183 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_D_BM_LF },
184 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_D_BM_LM },
185 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_LF },
186 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_LM },
187 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_V },
188 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_1 },
189 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_2 },
190 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_3 }
191 };
192 
193 /*********************************************************************
194  *  Function prototypes
195  *********************************************************************/
196 int  em_probe(struct device *, void *, void *);
197 void em_attach(struct device *, struct device *, void *);
198 void em_defer_attach(struct device*);
199 int  em_detach(struct device *, int);
200 int  em_activate(struct device *, int);
201 int  em_intr(void *);
202 void em_start(struct ifnet *);
203 int  em_ioctl(struct ifnet *, u_long, caddr_t);
204 void em_watchdog(struct ifnet *);
205 void em_init(void *);
206 void em_stop(void *, int);
207 void em_media_status(struct ifnet *, struct ifmediareq *);
208 int  em_media_change(struct ifnet *);
209 int  em_flowstatus(struct em_softc *);
210 void em_identify_hardware(struct em_softc *);
211 int  em_allocate_pci_resources(struct em_softc *);
212 void em_free_pci_resources(struct em_softc *);
213 void em_local_timer(void *);
214 int  em_hardware_init(struct em_softc *);
215 void em_setup_interface(struct em_softc *);
216 int  em_setup_transmit_structures(struct em_softc *);
217 void em_initialize_transmit_unit(struct em_softc *);
218 int  em_setup_receive_structures(struct em_softc *);
219 void em_initialize_receive_unit(struct em_softc *);
220 void em_enable_intr(struct em_softc *);
221 void em_disable_intr(struct em_softc *);
222 void em_free_transmit_structures(struct em_softc *);
223 void em_free_receive_structures(struct em_softc *);
224 void em_update_stats_counters(struct em_softc *);
225 void em_txeof(struct em_softc *);
226 int  em_allocate_receive_structures(struct em_softc *);
227 int  em_allocate_transmit_structures(struct em_softc *);
228 #ifdef __STRICT_ALIGNMENT
229 void em_realign(struct em_softc *, struct mbuf *, u_int16_t *);
230 #else
231 #define em_realign(a, b, c) /* a, b, c */
232 #endif
233 int  em_rxfill(struct em_softc *);
234 void em_rxeof(struct em_softc *);
235 void em_receive_checksum(struct em_softc *, struct em_rx_desc *,
236 			 struct mbuf *);
237 void em_transmit_checksum_setup(struct em_softc *, struct mbuf *,
238 				u_int32_t *, u_int32_t *);
239 void em_iff(struct em_softc *);
240 #ifdef EM_DEBUG
241 void em_print_hw_stats(struct em_softc *);
242 #endif
243 void em_update_link_status(struct em_softc *);
244 int  em_get_buf(struct em_softc *, int);
245 void em_enable_hw_vlans(struct em_softc *);
246 int  em_encap(struct em_softc *, struct mbuf *);
247 void em_smartspeed(struct em_softc *);
248 int  em_82547_fifo_workaround(struct em_softc *, int);
249 void em_82547_update_fifo_head(struct em_softc *, int);
250 int  em_82547_tx_fifo_reset(struct em_softc *);
251 void em_82547_move_tail(void *arg);
252 void em_82547_move_tail_locked(struct em_softc *);
253 int  em_dma_malloc(struct em_softc *, bus_size_t, struct em_dma_alloc *,
254 		   int);
255 void em_dma_free(struct em_softc *, struct em_dma_alloc *);
256 u_int32_t em_fill_descriptors(u_int64_t address, u_int32_t length,
257 			      PDESC_ARRAY desc_array);
258 
259 /*********************************************************************
260  *  OpenBSD Device Interface Entry Points
261  *********************************************************************/
262 
263 struct cfattach em_ca = {
264 	sizeof(struct em_softc), em_probe, em_attach, em_detach,
265 	em_activate
266 };
267 
268 struct cfdriver em_cd = {
269 	NULL, "em", DV_IFNET
270 };
271 
272 static int em_smart_pwr_down = FALSE;
273 
274 /*********************************************************************
275  *  Device identification routine
276  *
277  *  em_probe determines if the driver should be loaded on
278  *  adapter based on PCI vendor/device id of the adapter.
279  *
280  *  return 0 on no match, positive on match
281  *********************************************************************/
282 
283 int
284 em_probe(struct device *parent, void *match, void *aux)
285 {
286 	INIT_DEBUGOUT("em_probe: begin");
287 
288 	return (pci_matchbyid((struct pci_attach_args *)aux, em_devices,
289 	    nitems(em_devices)));
290 }
291 
292 void
293 em_defer_attach(struct device *self)
294 {
295 	struct em_softc *sc = (struct em_softc *)self;
296 	struct pci_attach_args *pa = &sc->osdep.em_pa;
297 	pci_chipset_tag_t	pc = pa->pa_pc;
298 	void *gcu;
299 
300 	if ((gcu = em_lookup_gcu(self)) == 0) {
301 		printf("%s: No GCU found, defered attachment failed\n",
302 		    sc->sc_dv.dv_xname);
303 
304 		if (sc->sc_intrhand)
305 			pci_intr_disestablish(pc, sc->sc_intrhand);
306 		sc->sc_intrhand = 0;
307 
308 		em_stop(sc, 1);
309 
310 		em_free_pci_resources(sc);
311 		em_dma_free(sc, &sc->rxdma);
312 		em_dma_free(sc, &sc->txdma);
313 
314 		return;
315 	}
316 
317 	sc->hw.gcu = gcu;
318 
319 	em_attach_miibus(self);
320 
321 	em_setup_interface(sc);
322 
323 	em_update_link_status(sc);
324 
325 	em_setup_link(&sc->hw);
326 }
327 
328 /*********************************************************************
329  *  Device initialization routine
330  *
331  *  The attach entry point is called when the driver is being loaded.
332  *  This routine identifies the type of hardware, allocates all resources
333  *  and initializes the hardware.
334  *
335  *********************************************************************/
336 
337 void
338 em_attach(struct device *parent, struct device *self, void *aux)
339 {
340 	struct pci_attach_args *pa = aux;
341 	struct em_softc *sc;
342 	int tsize, rsize;
343 	int defer = 0;
344 
345 	INIT_DEBUGOUT("em_attach: begin");
346 
347 	sc = (struct em_softc *)self;
348 	sc->osdep.em_pa = *pa;
349 
350 	timeout_set(&sc->timer_handle, em_local_timer, sc);
351 	timeout_set(&sc->tx_fifo_timer_handle, em_82547_move_tail, sc);
352 
353 	/* Determine hardware revision */
354 	em_identify_hardware(sc);
355 
356 	/*
357 	 * Only use MSI on the newer PCIe parts, with the exception
358 	 * of 82571/82572 due to "Byte Enables 2 and 3 Are Not Set" errata
359 	 */
360 	if (sc->hw.mac_type <= em_82572)
361 		sc->osdep.em_pa.pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
362 
363 	/* Parameters (to be read from user) */
364 	if (sc->hw.mac_type >= em_82544) {
365 		sc->num_tx_desc = EM_MAX_TXD;
366 		sc->num_rx_desc = EM_MAX_RXD;
367 	} else {
368 		sc->num_tx_desc = EM_MAX_TXD_82543;
369 		sc->num_rx_desc = EM_MAX_RXD_82543;
370 	}
371 	sc->tx_int_delay = EM_TIDV;
372 	sc->tx_abs_int_delay = EM_TADV;
373 	sc->rx_int_delay = EM_RDTR;
374 	sc->rx_abs_int_delay = EM_RADV;
375 	sc->hw.autoneg = DO_AUTO_NEG;
376 	sc->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
377 	sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
378 	sc->hw.tbi_compatibility_en = TRUE;
379 	sc->rx_buffer_len = EM_RXBUFFER_2048;
380 
381 	sc->hw.phy_init_script = 1;
382 	sc->hw.phy_reset_disable = FALSE;
383 
384 #ifndef EM_MASTER_SLAVE
385 	sc->hw.master_slave = em_ms_hw_default;
386 #else
387 	sc->hw.master_slave = EM_MASTER_SLAVE;
388 #endif
389 
390 	/*
391 	 * This controls when hardware reports transmit completion
392 	 * status.
393 	 */
394 	sc->hw.report_tx_early = 1;
395 
396 	if (em_allocate_pci_resources(sc))
397 		goto err_pci;
398 
399 	/* Initialize eeprom parameters */
400 	em_init_eeprom_params(&sc->hw);
401 
402 	/*
403 	 * Set the max frame size assuming standard Ethernet
404 	 * sized frames.
405 	 */
406 	switch (sc->hw.mac_type) {
407 		case em_82573:
408 		{
409 			uint16_t	eeprom_data = 0;
410 
411 			/*
412 			 * 82573 only supports Jumbo frames
413 			 * if ASPM is disabled.
414 			 */
415 			em_read_eeprom(&sc->hw, EEPROM_INIT_3GIO_3,
416 			    1, &eeprom_data);
417 			if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
418 				sc->hw.max_frame_size = ETHER_MAX_LEN;
419 				break;
420 			}
421 			/* Allow Jumbo frames */
422 			/* FALLTHROUGH */
423 		}
424 		case em_82571:
425 		case em_82572:
426 		case em_82574:
427 		case em_82575:
428 		case em_82580:
429 		case em_i210:
430 		case em_i350:
431 		case em_ich9lan:
432 		case em_ich10lan:
433 		case em_80003es2lan:
434 			/* Limit Jumbo Frame size */
435 			sc->hw.max_frame_size = 9234;
436 			break;
437 		case em_pchlan:
438 			sc->hw.max_frame_size = 4096;
439 			break;
440 		case em_82542_rev2_0:
441 		case em_82542_rev2_1:
442 		case em_ich8lan:
443 			/* Adapters that do not support Jumbo frames */
444 			sc->hw.max_frame_size = ETHER_MAX_LEN;
445 			break;
446 		default:
447 			sc->hw.max_frame_size =
448 			    MAX_JUMBO_FRAME_SIZE;
449 	}
450 
451 	sc->hw.min_frame_size =
452 	    ETHER_MIN_LEN + ETHER_CRC_LEN;
453 
454 	if (sc->hw.mac_type >= em_82544)
455 	    tsize = EM_ROUNDUP(sc->num_tx_desc * sizeof(struct em_tx_desc),
456 		EM_MAX_TXD * sizeof(struct em_tx_desc));
457 	else
458 	    tsize = EM_ROUNDUP(sc->num_tx_desc * sizeof(struct em_tx_desc),
459 		EM_MAX_TXD_82543 * sizeof(struct em_tx_desc));
460 	tsize = EM_ROUNDUP(tsize, PAGE_SIZE);
461 
462 	/* Allocate Transmit Descriptor ring */
463 	if (em_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
464 		printf("%s: Unable to allocate tx_desc memory\n",
465 		       sc->sc_dv.dv_xname);
466 		goto err_tx_desc;
467 	}
468 	sc->tx_desc_base = (struct em_tx_desc *)sc->txdma.dma_vaddr;
469 
470 	if (sc->hw.mac_type >= em_82544)
471 	    rsize = EM_ROUNDUP(sc->num_rx_desc * sizeof(struct em_rx_desc),
472 		EM_MAX_RXD * sizeof(struct em_rx_desc));
473 	else
474 	    rsize = EM_ROUNDUP(sc->num_rx_desc * sizeof(struct em_rx_desc),
475 		EM_MAX_RXD_82543 * sizeof(struct em_rx_desc));
476 	rsize = EM_ROUNDUP(rsize, PAGE_SIZE);
477 
478 	/* Allocate Receive Descriptor ring */
479 	if (em_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
480 		printf("%s: Unable to allocate rx_desc memory\n",
481 		       sc->sc_dv.dv_xname);
482 		goto err_rx_desc;
483 	}
484 	sc->rx_desc_base = (struct em_rx_desc *) sc->rxdma.dma_vaddr;
485 
486 	/* Initialize the hardware */
487 	if ((defer = em_hardware_init(sc))) {
488 		if (defer == EAGAIN)
489 			config_defer(self, em_defer_attach);
490 		else {
491 			printf("%s: Unable to initialize the hardware\n",
492 			    sc->sc_dv.dv_xname);
493 			goto err_hw_init;
494 		}
495 	}
496 
497 	if (sc->hw.mac_type == em_80003es2lan || sc->hw.mac_type == em_82575 ||
498 	    sc->hw.mac_type == em_82580 || sc->hw.mac_type == em_i210 ||
499 	    sc->hw.mac_type == em_i350) {
500 		uint32_t reg = EM_READ_REG(&sc->hw, E1000_STATUS);
501 		sc->hw.bus_func = (reg & E1000_STATUS_FUNC_MASK) >>
502 		    E1000_STATUS_FUNC_SHIFT;
503 
504 		switch (sc->hw.bus_func) {
505 		case 0:
506 			sc->hw.swfw = E1000_SWFW_PHY0_SM;
507 			break;
508 		case 1:
509 			sc->hw.swfw = E1000_SWFW_PHY1_SM;
510 			break;
511 		case 2:
512 			sc->hw.swfw = E1000_SWFW_PHY2_SM;
513 			break;
514 		case 3:
515 			sc->hw.swfw = E1000_SWFW_PHY3_SM;
516 			break;
517 		}
518 	} else {
519 		sc->hw.bus_func = 0;
520 	}
521 
522 	/* Copy the permanent MAC address out of the EEPROM */
523 	if (em_read_mac_addr(&sc->hw) < 0) {
524 		printf("%s: EEPROM read error while reading mac address\n",
525 		       sc->sc_dv.dv_xname);
526 		goto err_mac_addr;
527 	}
528 
529 	bcopy(sc->hw.mac_addr, sc->interface_data.ac_enaddr,
530 	    ETHER_ADDR_LEN);
531 
532 	/* Setup OS specific network interface */
533 	if (!defer)
534 		em_setup_interface(sc);
535 
536 	/* Initialize statistics */
537 	em_clear_hw_cntrs(&sc->hw);
538 #ifndef SMALL_KERNEL
539 	em_update_stats_counters(sc);
540 #endif
541 	sc->hw.get_link_status = 1;
542 	if (!defer)
543 		em_update_link_status(sc);
544 
545 	printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
546 
547 	/* Indicate SOL/IDER usage */
548 	if (em_check_phy_reset_block(&sc->hw))
549 		printf("%s: PHY reset is blocked due to SOL/IDER session.\n",
550 		    sc->sc_dv.dv_xname);
551 
552 	/* Identify 82544 on PCI-X */
553 	em_get_bus_info(&sc->hw);
554 	if (sc->hw.bus_type == em_bus_type_pcix &&
555 	    sc->hw.mac_type == em_82544)
556 		sc->pcix_82544 = TRUE;
557         else
558 		sc->pcix_82544 = FALSE;
559 
560 	sc->hw.icp_xxxx_is_link_up = FALSE;
561 
562 	INIT_DEBUGOUT("em_attach: end");
563 	return;
564 
565 err_mac_addr:
566 err_hw_init:
567 	em_dma_free(sc, &sc->rxdma);
568 err_rx_desc:
569 	em_dma_free(sc, &sc->txdma);
570 err_tx_desc:
571 err_pci:
572 	em_free_pci_resources(sc);
573 }
574 
575 /*********************************************************************
576  *  Transmit entry point
577  *
578  *  em_start is called by the stack to initiate a transmit.
579  *  The driver will remain in this routine as long as there are
580  *  packets to transmit and transmit resources are available.
581  *  In case resources are not available stack is notified and
582  *  the packet is requeued.
583  **********************************************************************/
584 
585 void
586 em_start(struct ifnet *ifp)
587 {
588 	struct mbuf    *m_head;
589 	struct em_softc *sc = ifp->if_softc;
590 	int		post = 0;
591 
592 	if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
593 		return;
594 
595 	if (!sc->link_active)
596 		return;
597 
598 	if (sc->hw.mac_type != em_82547) {
599 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
600 		    sc->txdma.dma_map->dm_mapsize,
601 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
602 	}
603 
604 	for (;;) {
605 		IFQ_POLL(&ifp->if_snd, m_head);
606 		if (m_head == NULL)
607 			break;
608 
609 		if (em_encap(sc, m_head)) {
610 			ifp->if_flags |= IFF_OACTIVE;
611 			break;
612 		}
613 
614 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
615 
616 #if NBPFILTER > 0
617 		/* Send a copy of the frame to the BPF listener */
618 		if (ifp->if_bpf)
619 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
620 #endif
621 
622 		/* Set timeout in case hardware has problems transmitting */
623 		ifp->if_timer = EM_TX_TIMEOUT;
624 
625 		post = 1;
626 	}
627 
628 	if (sc->hw.mac_type != em_82547) {
629 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
630 		    sc->txdma.dma_map->dm_mapsize,
631 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
632 		/*
633 		 * Advance the Transmit Descriptor Tail (Tdt),
634 		 * this tells the E1000 that this frame is
635 		 * available to transmit.
636 		 */
637 		if (post)
638 			E1000_WRITE_REG(&sc->hw, TDT, sc->next_avail_tx_desc);
639 	}
640 }
641 
642 /*********************************************************************
643  *  Ioctl entry point
644  *
645  *  em_ioctl is called when the user wants to configure the
646  *  interface.
647  *
648  *  return 0 on success, positive on failure
649  **********************************************************************/
650 
651 int
652 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
653 {
654 	int		error = 0;
655 	struct ifreq   *ifr = (struct ifreq *) data;
656 	struct ifaddr  *ifa = (struct ifaddr *)data;
657 	struct em_softc *sc = ifp->if_softc;
658 	int s;
659 
660 	s = splnet();
661 
662 	switch (command) {
663 	case SIOCSIFADDR:
664 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
665 			       "Addr)");
666 		if (!(ifp->if_flags & IFF_UP)) {
667 			ifp->if_flags |= IFF_UP;
668 			em_init(sc);
669 		}
670 #ifdef INET
671 		if (ifa->ifa_addr->sa_family == AF_INET)
672 			arp_ifinit(&sc->interface_data, ifa);
673 #endif /* INET */
674 		break;
675 
676 	case SIOCSIFFLAGS:
677 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
678 		if (ifp->if_flags & IFF_UP) {
679 			if (ifp->if_flags & IFF_RUNNING)
680 				error = ENETRESET;
681 			else
682 				em_init(sc);
683 		} else {
684 			if (ifp->if_flags & IFF_RUNNING)
685 				em_stop(sc, 0);
686 		}
687 		break;
688 
689 	case SIOCSIFMEDIA:
690 		/* Check SOL/IDER usage */
691 		if (em_check_phy_reset_block(&sc->hw)) {
692 			printf("%s: Media change is blocked due to SOL/IDER session.\n",
693 			    sc->sc_dv.dv_xname);
694 			break;
695 		}
696 	case SIOCGIFMEDIA:
697 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
698 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
699 		break;
700 
701 	case SIOCGIFRXR:
702 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
703 		    NULL, MCLBYTES, &sc->rx_ring);
704 		break;
705 
706 	default:
707 		error = ether_ioctl(ifp, &sc->interface_data, command, data);
708 	}
709 
710 	if (error == ENETRESET) {
711 		if (ifp->if_flags & IFF_RUNNING) {
712 			em_disable_intr(sc);
713 			em_iff(sc);
714 			if (sc->hw.mac_type == em_82542_rev2_0)
715 				em_initialize_receive_unit(sc);
716 			em_enable_intr(sc);
717 		}
718 		error = 0;
719 	}
720 
721 	splx(s);
722 	return (error);
723 }
724 
725 /*********************************************************************
726  *  Watchdog entry point
727  *
728  *  This routine is called whenever hardware quits transmitting.
729  *
730  **********************************************************************/
731 
732 void
733 em_watchdog(struct ifnet *ifp)
734 {
735 	struct em_softc *sc = ifp->if_softc;
736 
737 	/* If we are in this routine because of pause frames, then
738 	 * don't reset the hardware.
739 	 */
740 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_TXOFF) {
741 		ifp->if_timer = EM_TX_TIMEOUT;
742 		return;
743 	}
744 	printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
745 
746 	em_init(sc);
747 
748 	sc->watchdog_events++;
749 }
750 
751 /*********************************************************************
752  *  Init entry point
753  *
754  *  This routine is used in two ways. It is used by the stack as
755  *  init entry point in network interface structure. It is also used
756  *  by the driver as a hw/sw initialization routine to get to a
757  *  consistent state.
758  *
759  **********************************************************************/
760 
761 void
762 em_init(void *arg)
763 {
764 	struct em_softc *sc = arg;
765 	struct ifnet   *ifp = &sc->interface_data.ac_if;
766 	uint32_t	pba;
767 	int s;
768 
769 	s = splnet();
770 
771 	INIT_DEBUGOUT("em_init: begin");
772 
773 	em_stop(sc, 0);
774 
775 	/*
776 	 * Packet Buffer Allocation (PBA)
777 	 * Writing PBA sets the receive portion of the buffer
778 	 * the remainder is used for the transmit buffer.
779 	 *
780 	 * Devices before the 82547 had a Packet Buffer of 64K.
781 	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
782 	 * After the 82547 the buffer was reduced to 40K.
783 	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
784 	 *   Note: default does not leave enough room for Jumbo Frame >10k.
785 	 */
786 	switch (sc->hw.mac_type) {
787 	case em_82547:
788 	case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
789 		if (sc->hw.max_frame_size > EM_RXBUFFER_8192)
790 			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
791 		else
792 			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
793 		sc->tx_fifo_head = 0;
794 		sc->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
795 		sc->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
796 		break;
797 	case em_82571:
798 	case em_82572: /* Total Packet Buffer on these is 48k */
799 	case em_82575:
800 	case em_82580:
801 	case em_80003es2lan:
802 	case em_i350:
803 		pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
804 		break;
805 	case em_i210:
806 		pba = E1000_PBA_34K;
807 		break;
808 	case em_82573: /* 82573: Total Packet Buffer is 32K */
809 		/* Jumbo frames not supported */
810 		pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
811 		break;
812 	case em_82574: /* Total Packet Buffer is 40k */
813 		pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
814 		break;
815 	case em_ich8lan:
816 		pba = E1000_PBA_8K;
817 		break;
818 	case em_ich9lan:
819 	case em_ich10lan:
820 		pba = E1000_PBA_10K;
821 		break;
822 	case em_pchlan:
823 	case em_pch2lan:
824 	case em_pch_lpt:
825 		pba = E1000_PBA_26K;
826 		break;
827 	default:
828 		/* Devices before 82547 had a Packet Buffer of 64K.   */
829 		if (sc->hw.max_frame_size > EM_RXBUFFER_8192)
830 			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
831 		else
832 			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
833 	}
834 	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
835 	E1000_WRITE_REG(&sc->hw, PBA, pba);
836 
837 	/* Get the latest mac address, User can use a LAA */
838 	bcopy(sc->interface_data.ac_enaddr, sc->hw.mac_addr,
839 	      ETHER_ADDR_LEN);
840 
841 	/* Initialize the hardware */
842 	if (em_hardware_init(sc)) {
843 		printf("%s: Unable to initialize the hardware\n",
844 		       sc->sc_dv.dv_xname);
845 		splx(s);
846 		return;
847 	}
848 	em_update_link_status(sc);
849 
850 	E1000_WRITE_REG(&sc->hw, VET, ETHERTYPE_VLAN);
851 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
852 		em_enable_hw_vlans(sc);
853 
854 	/* Prepare transmit descriptors and buffers */
855 	if (em_setup_transmit_structures(sc)) {
856 		printf("%s: Could not setup transmit structures\n",
857 		       sc->sc_dv.dv_xname);
858 		em_stop(sc, 0);
859 		splx(s);
860 		return;
861 	}
862 	em_initialize_transmit_unit(sc);
863 
864 	/* Prepare receive descriptors and buffers */
865 	if (em_setup_receive_structures(sc)) {
866 		printf("%s: Could not setup receive structures\n",
867 		       sc->sc_dv.dv_xname);
868 		em_stop(sc, 0);
869 		splx(s);
870 		return;
871 	}
872 	em_initialize_receive_unit(sc);
873 
874 	/* Program promiscuous mode and multicast filters. */
875 	em_iff(sc);
876 
877 	ifp->if_flags |= IFF_RUNNING;
878 	ifp->if_flags &= ~IFF_OACTIVE;
879 
880 	timeout_add_sec(&sc->timer_handle, 1);
881 	em_clear_hw_cntrs(&sc->hw);
882 	em_enable_intr(sc);
883 
884 	/* Don't reset the phy next time init gets called */
885 	sc->hw.phy_reset_disable = TRUE;
886 
887 	splx(s);
888 }
889 
890 /*********************************************************************
891  *
892  *  Interrupt Service routine
893  *
894  **********************************************************************/
895 int
896 em_intr(void *arg)
897 {
898 	struct em_softc	*sc = arg;
899 	struct ifnet	*ifp = &sc->interface_data.ac_if;
900 	u_int32_t	reg_icr, test_icr;
901 	int		refill = 0;
902 
903 	test_icr = reg_icr = E1000_READ_REG(&sc->hw, ICR);
904 	if (sc->hw.mac_type >= em_82571)
905 		test_icr = (reg_icr & E1000_ICR_INT_ASSERTED);
906 	if (!test_icr)
907 		return (0);
908 
909 	if (ifp->if_flags & IFF_RUNNING) {
910 		em_rxeof(sc);
911 		em_txeof(sc);
912 		refill = 1;
913 	}
914 
915 	/* Link status change */
916 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
917 		timeout_del(&sc->timer_handle);
918 		sc->hw.get_link_status = 1;
919 		em_check_for_link(&sc->hw);
920 		em_update_link_status(sc);
921 		timeout_add_sec(&sc->timer_handle, 1);
922 	}
923 
924 	if (reg_icr & E1000_ICR_RXO) {
925 		sc->rx_overruns++;
926 		refill = 1;
927 	}
928 
929 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
930 		em_start(ifp);
931 
932 	if (refill && em_rxfill(sc)) {
933 		/* Advance the Rx Queue #0 "Tail Pointer". */
934 		E1000_WRITE_REG(&sc->hw, RDT, sc->last_rx_desc_filled);
935 	}
936 
937 	return (1);
938 }
939 
940 /*********************************************************************
941  *
942  *  Media Ioctl callback
943  *
944  *  This routine is called whenever the user queries the status of
945  *  the interface using ifconfig.
946  *
947  **********************************************************************/
948 void
949 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
950 {
951 	struct em_softc *sc = ifp->if_softc;
952 	u_char fiber_type = IFM_1000_SX;
953 	u_int16_t gsr;
954 
955 	INIT_DEBUGOUT("em_media_status: begin");
956 
957 	em_check_for_link(&sc->hw);
958 	em_update_link_status(sc);
959 
960 	ifmr->ifm_status = IFM_AVALID;
961 	ifmr->ifm_active = IFM_ETHER;
962 
963 	if (!sc->link_active) {
964 		ifmr->ifm_active |= IFM_NONE;
965 		return;
966 	}
967 
968 	ifmr->ifm_status |= IFM_ACTIVE;
969 
970 	if (sc->hw.media_type == em_media_type_fiber ||
971 	    sc->hw.media_type == em_media_type_internal_serdes) {
972 		if (sc->hw.mac_type == em_82545)
973 			fiber_type = IFM_1000_LX;
974 		ifmr->ifm_active |= fiber_type | IFM_FDX;
975 	} else {
976 		switch (sc->link_speed) {
977 		case 10:
978 			ifmr->ifm_active |= IFM_10_T;
979 			break;
980 		case 100:
981 			ifmr->ifm_active |= IFM_100_TX;
982 			break;
983 		case 1000:
984 			ifmr->ifm_active |= IFM_1000_T;
985 			break;
986 		}
987 
988 		if (sc->link_duplex == FULL_DUPLEX)
989 			ifmr->ifm_active |= em_flowstatus(sc) | IFM_FDX;
990 		else
991 			ifmr->ifm_active |= IFM_HDX;
992 
993 		if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_1000_T) {
994 			em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &gsr);
995 			if (gsr & SR_1000T_MS_CONFIG_RES)
996 				ifmr->ifm_active |= IFM_ETH_MASTER;
997 		}
998 	}
999 }
1000 
1001 /*********************************************************************
1002  *
1003  *  Media Ioctl callback
1004  *
1005  *  This routine is called when the user changes speed/duplex using
1006  *  media/mediopt option with ifconfig.
1007  *
1008  **********************************************************************/
1009 int
1010 em_media_change(struct ifnet *ifp)
1011 {
1012 	struct em_softc *sc = ifp->if_softc;
1013 	struct ifmedia	*ifm = &sc->media;
1014 
1015 	INIT_DEBUGOUT("em_media_change: begin");
1016 
1017 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1018 		return (EINVAL);
1019 
1020 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1021 	case IFM_AUTO:
1022 		sc->hw.autoneg = DO_AUTO_NEG;
1023 		sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1024 		break;
1025 	case IFM_1000_LX:
1026 	case IFM_1000_SX:
1027 	case IFM_1000_T:
1028 		sc->hw.autoneg = DO_AUTO_NEG;
1029 		sc->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1030 		break;
1031 	case IFM_100_TX:
1032 		sc->hw.autoneg = FALSE;
1033 		sc->hw.autoneg_advertised = 0;
1034 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1035 			sc->hw.forced_speed_duplex = em_100_full;
1036 		else
1037 			sc->hw.forced_speed_duplex = em_100_half;
1038 		break;
1039 	case IFM_10_T:
1040 		sc->hw.autoneg = FALSE;
1041 		sc->hw.autoneg_advertised = 0;
1042 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1043 			sc->hw.forced_speed_duplex = em_10_full;
1044 		else
1045 			sc->hw.forced_speed_duplex = em_10_half;
1046 		break;
1047 	default:
1048 		printf("%s: Unsupported media type\n", sc->sc_dv.dv_xname);
1049 	}
1050 
1051 	/*
1052 	 * As the speed/duplex settings may have changed we need to
1053 	 * reset the PHY.
1054 	 */
1055 	sc->hw.phy_reset_disable = FALSE;
1056 
1057 	em_init(sc);
1058 
1059 	return (0);
1060 }
1061 
1062 int
1063 em_flowstatus(struct em_softc *sc)
1064 {
1065 	u_int16_t ar, lpar;
1066 
1067 	if (sc->hw.media_type == em_media_type_fiber ||
1068 	    sc->hw.media_type == em_media_type_internal_serdes)
1069 		return (0);
1070 
1071 	em_read_phy_reg(&sc->hw, PHY_AUTONEG_ADV, &ar);
1072 	em_read_phy_reg(&sc->hw, PHY_LP_ABILITY, &lpar);
1073 
1074 	if ((ar & NWAY_AR_PAUSE) && (lpar & NWAY_LPAR_PAUSE))
1075 		return (IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE);
1076 	else if (!(ar & NWAY_AR_PAUSE) && (ar & NWAY_AR_ASM_DIR) &&
1077 		(lpar & NWAY_LPAR_PAUSE) && (lpar & NWAY_LPAR_ASM_DIR))
1078 		return (IFM_FLOW|IFM_ETH_TXPAUSE);
1079 	else if ((ar & NWAY_AR_PAUSE) && (ar & NWAY_AR_ASM_DIR) &&
1080 		!(lpar & NWAY_LPAR_PAUSE) && (lpar & NWAY_LPAR_ASM_DIR))
1081 		return (IFM_FLOW|IFM_ETH_RXPAUSE);
1082 
1083 	return (0);
1084 }
1085 
1086 /*********************************************************************
1087  *
1088  *  This routine maps the mbufs to tx descriptors.
1089  *
1090  *  return 0 on success, positive on failure
1091  **********************************************************************/
1092 int
1093 em_encap(struct em_softc *sc, struct mbuf *m_head)
1094 {
1095 	u_int32_t	txd_upper;
1096 	u_int32_t	txd_lower, txd_used = 0, txd_saved = 0;
1097 	int		i, j, first, error = 0, last = 0;
1098 	bus_dmamap_t	map;
1099 
1100 	/* For 82544 Workaround */
1101 	DESC_ARRAY		desc_array;
1102 	u_int32_t		array_elements;
1103 	u_int32_t		counter;
1104 
1105 	struct em_buffer   *tx_buffer, *tx_buffer_mapped;
1106 	struct em_tx_desc *current_tx_desc = NULL;
1107 
1108 	/*
1109 	 * Force a cleanup if number of TX descriptors
1110 	 * available hits the threshold
1111 	 */
1112 	if (sc->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1113 		em_txeof(sc);
1114 		/* Now do we at least have a minimal? */
1115 		if (sc->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1116 			sc->no_tx_desc_avail1++;
1117 			return (ENOBUFS);
1118 		}
1119 	}
1120 
1121 	if (sc->hw.mac_type == em_82547) {
1122 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1123 		    sc->txdma.dma_map->dm_mapsize,
1124 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1125 	}
1126 
1127 	/*
1128 	 * Map the packet for DMA.
1129 	 *
1130 	 * Capture the first descriptor index,
1131 	 * this descriptor will have the index
1132 	 * of the EOP which is the only one that
1133 	 * no gets a DONE bit writeback.
1134 	 */
1135 	first = sc->next_avail_tx_desc;
1136 	tx_buffer = &sc->tx_buffer_area[first];
1137 	tx_buffer_mapped = tx_buffer;
1138 	map = tx_buffer->map;
1139 
1140 	error = bus_dmamap_load_mbuf(sc->txtag, map, m_head, BUS_DMA_NOWAIT);
1141 	switch (error) {
1142 	case 0:
1143 		break;
1144 	case EFBIG:
1145 		if ((error = m_defrag(m_head, M_DONTWAIT)) == 0 &&
1146 		    (error = bus_dmamap_load_mbuf(sc->txtag, map, m_head,
1147 		     BUS_DMA_NOWAIT)) == 0)
1148 			break;
1149 
1150 		/* FALLTHROUGH */
1151 	default:
1152 		sc->no_tx_dma_setup++;
1153 		goto loaderr;
1154 	}
1155 
1156 	EM_KASSERT(map->dm_nsegs!= 0, ("em_encap: empty packet"));
1157 
1158 	if (map->dm_nsegs > sc->num_tx_desc_avail - 2)
1159 		goto fail;
1160 
1161 	if (sc->hw.mac_type >= em_82543 && sc->hw.mac_type != em_82575 &&
1162 	    sc->hw.mac_type != em_82580 && sc->hw.mac_type != em_i210 &&
1163 	    sc->hw.mac_type != em_i350)
1164 		em_transmit_checksum_setup(sc, m_head, &txd_upper, &txd_lower);
1165 	else
1166 		txd_upper = txd_lower = 0;
1167 
1168 	i = sc->next_avail_tx_desc;
1169 	if (sc->pcix_82544)
1170 		txd_saved = i;
1171 
1172 	for (j = 0; j < map->dm_nsegs; j++) {
1173 		/* If sc is 82544 and on PCI-X bus */
1174 		if (sc->pcix_82544) {
1175 			/*
1176 			 * Check the Address and Length combination and
1177 			 * split the data accordingly
1178 			 */
1179 			array_elements = em_fill_descriptors(map->dm_segs[j].ds_addr,
1180 							     map->dm_segs[j].ds_len,
1181 							     &desc_array);
1182 			for (counter = 0; counter < array_elements; counter++) {
1183 				if (txd_used == sc->num_tx_desc_avail) {
1184 					sc->next_avail_tx_desc = txd_saved;
1185 					goto fail;
1186 				}
1187 				tx_buffer = &sc->tx_buffer_area[i];
1188 				current_tx_desc = &sc->tx_desc_base[i];
1189 				current_tx_desc->buffer_addr = htole64(
1190 					desc_array.descriptor[counter].address);
1191 				current_tx_desc->lower.data = htole32(
1192 					(sc->txd_cmd | txd_lower |
1193 					 (u_int16_t)desc_array.descriptor[counter].length));
1194 				current_tx_desc->upper.data = htole32((txd_upper));
1195 				last = i;
1196 				if (++i == sc->num_tx_desc)
1197 					i = 0;
1198 
1199 				tx_buffer->m_head = NULL;
1200 				tx_buffer->next_eop = -1;
1201 				txd_used++;
1202 			}
1203 		} else {
1204 			tx_buffer = &sc->tx_buffer_area[i];
1205 			current_tx_desc = &sc->tx_desc_base[i];
1206 
1207 			current_tx_desc->buffer_addr = htole64(map->dm_segs[j].ds_addr);
1208 			current_tx_desc->lower.data = htole32(
1209 				sc->txd_cmd | txd_lower | map->dm_segs[j].ds_len);
1210 			current_tx_desc->upper.data = htole32(txd_upper);
1211 			last = i;
1212 			if (++i == sc->num_tx_desc)
1213 	        		i = 0;
1214 
1215 			tx_buffer->m_head = NULL;
1216 			tx_buffer->next_eop = -1;
1217 		}
1218 	}
1219 
1220 	sc->next_avail_tx_desc = i;
1221 	if (sc->pcix_82544)
1222 		sc->num_tx_desc_avail -= txd_used;
1223 	else
1224 		sc->num_tx_desc_avail -= map->dm_nsegs;
1225 
1226 #if NVLAN > 0
1227 	/* Find out if we are in VLAN mode */
1228 	if (m_head->m_flags & M_VLANTAG) {
1229 		/* Set the VLAN id */
1230 		current_tx_desc->upper.fields.special =
1231 			htole16(m_head->m_pkthdr.ether_vtag);
1232 
1233 		/* Tell hardware to add tag */
1234 		current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1235 	}
1236 #endif
1237 
1238 	tx_buffer->m_head = m_head;
1239 	tx_buffer_mapped->map = tx_buffer->map;
1240 	tx_buffer->map = map;
1241 	bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
1242 	    BUS_DMASYNC_PREWRITE);
1243 
1244 	/*
1245 	 * Last Descriptor of Packet
1246 	 * needs End Of Packet (EOP)
1247 	 * and Report Status (RS)
1248 	 */
1249 	current_tx_desc->lower.data |=
1250 	    htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1251 
1252 	/*
1253 	 * Keep track in the first buffer which
1254 	 * descriptor will be written back
1255 	 */
1256 	tx_buffer = &sc->tx_buffer_area[first];
1257 	tx_buffer->next_eop = last;
1258 
1259 	/*
1260 	 * Advance the Transmit Descriptor Tail (Tdt),
1261 	 * this tells the E1000 that this frame is
1262 	 * available to transmit.
1263 	 */
1264 	if (sc->hw.mac_type == em_82547) {
1265 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1266 		    sc->txdma.dma_map->dm_mapsize,
1267 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1268 		if (sc->link_duplex == HALF_DUPLEX)
1269 			em_82547_move_tail_locked(sc);
1270 		else {
1271 			E1000_WRITE_REG(&sc->hw, TDT, i);
1272 			em_82547_update_fifo_head(sc, m_head->m_pkthdr.len);
1273 		}
1274 	}
1275 
1276 	return (0);
1277 
1278 fail:
1279 	sc->no_tx_desc_avail2++;
1280 	bus_dmamap_unload(sc->txtag, map);
1281 	error = ENOBUFS;
1282 loaderr:
1283 	if (sc->hw.mac_type == em_82547) {
1284 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1285 		    sc->txdma.dma_map->dm_mapsize,
1286 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1287 	}
1288 	return (error);
1289 }
1290 
1291 /*********************************************************************
1292  *
1293  * 82547 workaround to avoid controller hang in half-duplex environment.
1294  * The workaround is to avoid queuing a large packet that would span
1295  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1296  * in this case. We do that only when FIFO is quiescent.
1297  *
1298  **********************************************************************/
1299 void
1300 em_82547_move_tail_locked(struct em_softc *sc)
1301 {
1302 	uint16_t hw_tdt;
1303 	uint16_t sw_tdt;
1304 	struct em_tx_desc *tx_desc;
1305 	uint16_t length = 0;
1306 	boolean_t eop = 0;
1307 
1308 	hw_tdt = E1000_READ_REG(&sc->hw, TDT);
1309 	sw_tdt = sc->next_avail_tx_desc;
1310 
1311 	while (hw_tdt != sw_tdt) {
1312 		tx_desc = &sc->tx_desc_base[hw_tdt];
1313 		length += tx_desc->lower.flags.length;
1314 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1315 		if (++hw_tdt == sc->num_tx_desc)
1316 			hw_tdt = 0;
1317 
1318 		if (eop) {
1319 			if (em_82547_fifo_workaround(sc, length)) {
1320 				sc->tx_fifo_wrk_cnt++;
1321 				timeout_add(&sc->tx_fifo_timer_handle, 1);
1322 				break;
1323 			}
1324 			E1000_WRITE_REG(&sc->hw, TDT, hw_tdt);
1325 			em_82547_update_fifo_head(sc, length);
1326 			length = 0;
1327 		}
1328 	}
1329 }
1330 
1331 void
1332 em_82547_move_tail(void *arg)
1333 {
1334 	struct em_softc *sc = arg;
1335 	int s;
1336 
1337 	s = splnet();
1338 	em_82547_move_tail_locked(sc);
1339 	splx(s);
1340 }
1341 
1342 int
1343 em_82547_fifo_workaround(struct em_softc *sc, int len)
1344 {
1345 	int fifo_space, fifo_pkt_len;
1346 
1347 	fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1348 
1349 	if (sc->link_duplex == HALF_DUPLEX) {
1350 		fifo_space = sc->tx_fifo_size - sc->tx_fifo_head;
1351 
1352 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1353 			if (em_82547_tx_fifo_reset(sc))
1354 				return (0);
1355 			else
1356 				return (1);
1357 		}
1358 	}
1359 
1360 	return (0);
1361 }
1362 
1363 void
1364 em_82547_update_fifo_head(struct em_softc *sc, int len)
1365 {
1366 	int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1367 
1368 	/* tx_fifo_head is always 16 byte aligned */
1369 	sc->tx_fifo_head += fifo_pkt_len;
1370 	if (sc->tx_fifo_head >= sc->tx_fifo_size)
1371 		sc->tx_fifo_head -= sc->tx_fifo_size;
1372 }
1373 
1374 int
1375 em_82547_tx_fifo_reset(struct em_softc *sc)
1376 {
1377 	uint32_t tctl;
1378 
1379 	if ((E1000_READ_REG(&sc->hw, TDT) ==
1380 	     E1000_READ_REG(&sc->hw, TDH)) &&
1381 	    (E1000_READ_REG(&sc->hw, TDFT) ==
1382 	     E1000_READ_REG(&sc->hw, TDFH)) &&
1383 	    (E1000_READ_REG(&sc->hw, TDFTS) ==
1384 	     E1000_READ_REG(&sc->hw, TDFHS)) &&
1385 	    (E1000_READ_REG(&sc->hw, TDFPC) == 0)) {
1386 
1387 		/* Disable TX unit */
1388 		tctl = E1000_READ_REG(&sc->hw, TCTL);
1389 		E1000_WRITE_REG(&sc->hw, TCTL, tctl & ~E1000_TCTL_EN);
1390 
1391 		/* Reset FIFO pointers */
1392 		E1000_WRITE_REG(&sc->hw, TDFT, sc->tx_head_addr);
1393 		E1000_WRITE_REG(&sc->hw, TDFH, sc->tx_head_addr);
1394 		E1000_WRITE_REG(&sc->hw, TDFTS, sc->tx_head_addr);
1395 		E1000_WRITE_REG(&sc->hw, TDFHS, sc->tx_head_addr);
1396 
1397 		/* Re-enable TX unit */
1398 		E1000_WRITE_REG(&sc->hw, TCTL, tctl);
1399 		E1000_WRITE_FLUSH(&sc->hw);
1400 
1401 		sc->tx_fifo_head = 0;
1402 		sc->tx_fifo_reset_cnt++;
1403 
1404 		return (TRUE);
1405 	} else
1406 		return (FALSE);
1407 }
1408 
1409 void
1410 em_iff(struct em_softc *sc)
1411 {
1412 	struct ifnet *ifp = &sc->interface_data.ac_if;
1413 	struct arpcom *ac = &sc->interface_data;
1414 	u_int32_t reg_rctl = 0;
1415 	u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1416 	struct ether_multi *enm;
1417 	struct ether_multistep step;
1418 	int i = 0;
1419 
1420 	IOCTL_DEBUGOUT("em_iff: begin");
1421 
1422 	if (sc->hw.mac_type == em_82542_rev2_0) {
1423 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1424 		if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1425 			em_pci_clear_mwi(&sc->hw);
1426 		reg_rctl |= E1000_RCTL_RST;
1427 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1428 		msec_delay(5);
1429 	}
1430 
1431 	reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1432 	reg_rctl &= ~(E1000_RCTL_MPE | E1000_RCTL_UPE);
1433 	ifp->if_flags &= ~IFF_ALLMULTI;
1434 
1435 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1436 	    ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1437 		ifp->if_flags |= IFF_ALLMULTI;
1438 		reg_rctl |= E1000_RCTL_MPE;
1439 		if (ifp->if_flags & IFF_PROMISC)
1440 			reg_rctl |= E1000_RCTL_UPE;
1441 	} else {
1442 		ETHER_FIRST_MULTI(step, ac, enm);
1443 		while (enm != NULL) {
1444 			bcopy(enm->enm_addrlo, mta + i, ETH_LENGTH_OF_ADDRESS);
1445 			i += ETH_LENGTH_OF_ADDRESS;
1446 
1447 			ETHER_NEXT_MULTI(step, enm);
1448 		}
1449 
1450 		em_mc_addr_list_update(&sc->hw, mta, ac->ac_multicnt, 0, 1);
1451 	}
1452 
1453 	E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1454 
1455 	if (sc->hw.mac_type == em_82542_rev2_0) {
1456 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1457 		reg_rctl &= ~E1000_RCTL_RST;
1458 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1459 		msec_delay(5);
1460 		if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1461 			em_pci_set_mwi(&sc->hw);
1462 	}
1463 }
1464 
1465 /*********************************************************************
1466  *  Timer routine
1467  *
1468  *  This routine checks for link status and updates statistics.
1469  *
1470  **********************************************************************/
1471 
1472 void
1473 em_local_timer(void *arg)
1474 {
1475 	struct ifnet   *ifp;
1476 	struct em_softc *sc = arg;
1477 	int s;
1478 
1479 	ifp = &sc->interface_data.ac_if;
1480 
1481 	s = splnet();
1482 
1483 	em_check_for_link(&sc->hw);
1484 	em_update_link_status(sc);
1485 #ifndef SMALL_KERNEL
1486 	em_update_stats_counters(sc);
1487 #ifdef EM_DEBUG
1488 	if (ifp->if_flags & IFF_DEBUG && ifp->if_flags & IFF_RUNNING)
1489 		em_print_hw_stats(sc);
1490 #endif
1491 #endif
1492 	em_smartspeed(sc);
1493 
1494 	timeout_add_sec(&sc->timer_handle, 1);
1495 
1496 	splx(s);
1497 }
1498 
1499 void
1500 em_update_link_status(struct em_softc *sc)
1501 {
1502 	struct ifnet *ifp = &sc->interface_data.ac_if;
1503 
1504 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU) {
1505 		if (sc->link_active == 0) {
1506 			em_get_speed_and_duplex(&sc->hw,
1507 						&sc->link_speed,
1508 						&sc->link_duplex);
1509 			/* Check if we may set SPEED_MODE bit on PCI-E */
1510 			if ((sc->link_speed == SPEED_1000) &&
1511 			    ((sc->hw.mac_type == em_82571) ||
1512 			    (sc->hw.mac_type == em_82572) ||
1513 			    (sc->hw.mac_type == em_82575) ||
1514 			    (sc->hw.mac_type == em_82580))) {
1515 				int tarc0;
1516 
1517 				tarc0 = E1000_READ_REG(&sc->hw, TARC0);
1518 				tarc0 |= SPEED_MODE_BIT;
1519 				E1000_WRITE_REG(&sc->hw, TARC0, tarc0);
1520 			}
1521 			sc->link_active = 1;
1522 			sc->smartspeed = 0;
1523 			ifp->if_baudrate = IF_Mbps(sc->link_speed);
1524 		}
1525 		if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
1526 			if (sc->link_duplex == FULL_DUPLEX)
1527 				ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1528 			else
1529 				ifp->if_link_state = LINK_STATE_HALF_DUPLEX;
1530 			if_link_state_change(ifp);
1531 		}
1532 	} else {
1533 		if (sc->link_active == 1) {
1534 			ifp->if_baudrate = sc->link_speed = 0;
1535 			sc->link_duplex = 0;
1536 			sc->link_active = 0;
1537 		}
1538 		if (ifp->if_link_state != LINK_STATE_DOWN) {
1539 			ifp->if_link_state = LINK_STATE_DOWN;
1540 			if_link_state_change(ifp);
1541 		}
1542 	}
1543 }
1544 
1545 /*********************************************************************
1546  *
1547  *  This routine disables all traffic on the adapter by issuing a
1548  *  global reset on the MAC and deallocates TX/RX buffers.
1549  *
1550  **********************************************************************/
1551 
1552 void
1553 em_stop(void *arg, int softonly)
1554 {
1555 	struct em_softc *sc = arg;
1556 	struct ifnet   *ifp = &sc->interface_data.ac_if;
1557 
1558 	/* Tell the stack that the interface is no longer active */
1559 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1560 	ifp->if_timer = 0;
1561 
1562 	INIT_DEBUGOUT("em_stop: begin");
1563 
1564 	timeout_del(&sc->timer_handle);
1565 	timeout_del(&sc->tx_fifo_timer_handle);
1566 
1567 	if (!softonly) {
1568 		em_disable_intr(sc);
1569 		em_reset_hw(&sc->hw);
1570 	}
1571 
1572 	em_free_transmit_structures(sc);
1573 	em_free_receive_structures(sc);
1574 }
1575 
1576 /*********************************************************************
1577  *
1578  *  Determine hardware revision.
1579  *
1580  **********************************************************************/
1581 void
1582 em_identify_hardware(struct em_softc *sc)
1583 {
1584 	u_int32_t reg;
1585 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1586 
1587 	/* Make sure our PCI config space has the necessary stuff set */
1588 	sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
1589 					    PCI_COMMAND_STATUS_REG);
1590 
1591 	/* Save off the information about this board */
1592 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1593 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1594 
1595 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1596 	sc->hw.revision_id = PCI_REVISION(reg);
1597 
1598 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1599 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1600 	sc->hw.subsystem_id = PCI_PRODUCT(reg);
1601 
1602 	/* Identify the MAC */
1603 	if (em_set_mac_type(&sc->hw))
1604 		printf("%s: Unknown MAC Type\n", sc->sc_dv.dv_xname);
1605 
1606 	if (sc->hw.mac_type == em_pchlan)
1607 		sc->hw.revision_id = PCI_PRODUCT(pa->pa_id) & 0x0f;
1608 
1609 	if (sc->hw.mac_type == em_82541 ||
1610 	    sc->hw.mac_type == em_82541_rev_2 ||
1611 	    sc->hw.mac_type == em_82547 ||
1612 	    sc->hw.mac_type == em_82547_rev_2)
1613 		sc->hw.phy_init_script = TRUE;
1614 }
1615 
1616 int
1617 em_allocate_pci_resources(struct em_softc *sc)
1618 {
1619 	int		val, rid;
1620 	pci_intr_handle_t	ih;
1621 	const char		*intrstr = NULL;
1622 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1623 	pci_chipset_tag_t	pc = pa->pa_pc;
1624 
1625 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_MMBA);
1626 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1627 		printf(": mmba is not mem space\n");
1628 		return (ENXIO);
1629 	}
1630 	if (pci_mapreg_map(pa, EM_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
1631 	    &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
1632 	    &sc->osdep.em_membase, &sc->osdep.em_memsize, 0)) {
1633 		printf(": cannot find mem space\n");
1634 		return (ENXIO);
1635 	}
1636 
1637 	switch (sc->hw.mac_type) {
1638 	case em_82544:
1639 	case em_82540:
1640 	case em_82545:
1641 	case em_82546:
1642 	case em_82541:
1643 	case em_82541_rev_2:
1644 		/* Figure out where our I/O BAR is ? */
1645 		for (rid = PCI_MAPREG_START; rid < PCI_MAPREG_END;) {
1646 			val = pci_conf_read(pa->pa_pc, pa->pa_tag, rid);
1647 			if (PCI_MAPREG_TYPE(val) == PCI_MAPREG_TYPE_IO) {
1648 				sc->io_rid = rid;
1649 				break;
1650 			}
1651 			rid += 4;
1652 			if (PCI_MAPREG_MEM_TYPE(val) ==
1653 			    PCI_MAPREG_MEM_TYPE_64BIT)
1654 				rid += 4;	/* skip high bits, too */
1655 		}
1656 
1657 		if (pci_mapreg_map(pa, rid, PCI_MAPREG_TYPE_IO, 0,
1658 		    &sc->osdep.io_bus_space_tag, &sc->osdep.io_bus_space_handle,
1659 		    &sc->osdep.em_iobase, &sc->osdep.em_iosize, 0)) {
1660 			printf(": cannot find i/o space\n");
1661 			return (ENXIO);
1662 		}
1663 
1664 		sc->hw.io_base = 0;
1665 		break;
1666 	default:
1667 		break;
1668 	}
1669 
1670 	/* for ICH8 and family we need to find the flash memory */
1671 	if (IS_ICH8(sc->hw.mac_type)) {
1672 		val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_FLASH);
1673 		if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1674 			printf(": flash is not mem space\n");
1675 			return (ENXIO);
1676 		}
1677 
1678 		if (pci_mapreg_map(pa, EM_FLASH, PCI_MAPREG_MEM_TYPE(val), 0,
1679 		    &sc->osdep.flash_bus_space_tag, &sc->osdep.flash_bus_space_handle,
1680 		    &sc->osdep.em_flashbase, &sc->osdep.em_flashsize, 0)) {
1681 			printf(": cannot find mem space\n");
1682 			return (ENXIO);
1683 		}
1684         }
1685 
1686 	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
1687 		printf(": couldn't map interrupt\n");
1688 		return (ENXIO);
1689 	}
1690 
1691 	sc->osdep.dev = (struct device *)sc;
1692 	sc->hw.back = &sc->osdep;
1693 
1694 	intrstr = pci_intr_string(pc, ih);
1695 	sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, em_intr, sc,
1696 					      sc->sc_dv.dv_xname);
1697 	if (sc->sc_intrhand == NULL) {
1698 		printf(": couldn't establish interrupt");
1699 		if (intrstr != NULL)
1700 			printf(" at %s", intrstr);
1701 		printf("\n");
1702 		return (ENXIO);
1703 	}
1704 	printf(": %s", intrstr);
1705 
1706 	/*
1707 	 * the ICP_xxxx device has multiple, duplicate register sets for
1708 	 * use when it is being used as a network processor. Disable those
1709 	 * registers here, as they are not necessary in this context and
1710 	 * can confuse the system
1711 	 */
1712 	if(sc->hw.mac_type == em_icp_xxxx) {
1713 		int offset;
1714 		pcireg_t val;
1715 
1716 		if (!pci_get_capability(sc->osdep.em_pa.pa_pc,
1717 		    sc->osdep.em_pa.pa_tag, PCI_CAP_ID_ST, &offset, &val)) {
1718 			return (0);
1719 		}
1720 		offset += PCI_ST_SMIA_OFFSET;
1721 		pci_conf_write(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
1722 		    offset, 0x06);
1723 		E1000_WRITE_REG(&sc->hw, IMC1, ~0x0);
1724 		E1000_WRITE_REG(&sc->hw, IMC2, ~0x0);
1725 	}
1726 	return (0);
1727 }
1728 
1729 void
1730 em_free_pci_resources(struct em_softc *sc)
1731 {
1732 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1733 	pci_chipset_tag_t	pc = pa->pa_pc;
1734 
1735 	if (sc->sc_intrhand)
1736 		pci_intr_disestablish(pc, sc->sc_intrhand);
1737 	sc->sc_intrhand = 0;
1738 
1739 	if (sc->osdep.em_flashbase)
1740 		bus_space_unmap(sc->osdep.flash_bus_space_tag, sc->osdep.flash_bus_space_handle,
1741 				sc->osdep.em_flashsize);
1742 	sc->osdep.em_flashbase = 0;
1743 
1744 	if (sc->osdep.em_iobase)
1745 		bus_space_unmap(sc->osdep.io_bus_space_tag, sc->osdep.io_bus_space_handle,
1746 				sc->osdep.em_iosize);
1747 	sc->osdep.em_iobase = 0;
1748 
1749 	if (sc->osdep.em_membase)
1750 		bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
1751 				sc->osdep.em_memsize);
1752 	sc->osdep.em_membase = 0;
1753 }
1754 
1755 /*********************************************************************
1756  *
1757  *  Initialize the hardware to a configuration as specified by the
1758  *  em_softc structure. The controller is reset, the EEPROM is
1759  *  verified, the MAC address is set, then the shared initialization
1760  *  routines are called.
1761  *
1762  **********************************************************************/
1763 int
1764 em_hardware_init(struct em_softc *sc)
1765 {
1766 	uint32_t ret_val;
1767 	u_int16_t rx_buffer_size;
1768 
1769 	INIT_DEBUGOUT("em_hardware_init: begin");
1770 	/* Issue a global reset */
1771 	em_reset_hw(&sc->hw);
1772 
1773 	/* When hardware is reset, fifo_head is also reset */
1774 	sc->tx_fifo_head = 0;
1775 
1776 	/* Make sure we have a good EEPROM before we read from it */
1777 	if (em_validate_eeprom_checksum(&sc->hw) < 0) {
1778 		/*
1779 		 * Some PCIe parts fail the first check due to
1780 		 * the link being in sleep state, call it again,
1781 		 * if it fails a second time its a real issue.
1782 		 */
1783 		if (em_validate_eeprom_checksum(&sc->hw) < 0) {
1784 			printf("%s: The EEPROM Checksum Is Not Valid\n",
1785 			       sc->sc_dv.dv_xname);
1786 			return (EIO);
1787 		}
1788 	}
1789 
1790 	if (em_read_part_num(&sc->hw, &(sc->part_num)) < 0) {
1791 		printf("%s: EEPROM read error while reading part number\n",
1792 		       sc->sc_dv.dv_xname);
1793 		return (EIO);
1794 	}
1795 
1796 	/* Set up smart power down as default off on newer adapters */
1797 	if (!em_smart_pwr_down &&
1798 	     (sc->hw.mac_type == em_82571 ||
1799 	      sc->hw.mac_type == em_82572 ||
1800 	      sc->hw.mac_type == em_82575 ||
1801 	      sc->hw.mac_type == em_82580 ||
1802 	      sc->hw.mac_type == em_i210 ||
1803 	      sc->hw.mac_type == em_i350 )) {
1804 		uint16_t phy_tmp = 0;
1805 
1806 		/* Speed up time to link by disabling smart power down */
1807 		em_read_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
1808 		phy_tmp &= ~IGP02E1000_PM_SPD;
1809 		em_write_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp);
1810 	}
1811 
1812 	/*
1813 	 * These parameters control the automatic generation (Tx) and
1814 	 * response (Rx) to Ethernet PAUSE frames.
1815 	 * - High water mark should allow for at least two frames to be
1816 	 *   received after sending an XOFF.
1817 	 * - Low water mark works best when it is very near the high water mark.
1818 	 *   This allows the receiver to restart by sending XON when it has
1819 	 *   drained a bit.  Here we use an arbitary value of 1500 which will
1820 	 *   restart after one full frame is pulled from the buffer.  There
1821 	 *   could be several smaller frames in the buffer and if so they will
1822 	 *   not trigger the XON until their total number reduces the buffer
1823 	 *   by 1500.
1824 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1825 	 */
1826 	rx_buffer_size = ((E1000_READ_REG(&sc->hw, PBA) & 0xffff) << 10 );
1827 
1828 	sc->hw.fc_high_water = rx_buffer_size -
1829 	    EM_ROUNDUP(sc->hw.max_frame_size, 1024);
1830 	sc->hw.fc_low_water = sc->hw.fc_high_water - 1500;
1831 	if (sc->hw.mac_type == em_80003es2lan)
1832 		sc->hw.fc_pause_time = 0xFFFF;
1833 	else
1834 		sc->hw.fc_pause_time = 1000;
1835 	sc->hw.fc_send_xon = TRUE;
1836 	sc->hw.fc = E1000_FC_FULL;
1837 
1838 	if ((ret_val = em_init_hw(&sc->hw)) != 0) {
1839 		if (ret_val == E1000_DEFER_INIT) {
1840 			INIT_DEBUGOUT("\nHardware Initialization Deferred ");
1841 			return (EAGAIN);
1842 		}
1843 		printf("%s: Hardware Initialization Failed",
1844 		       sc->sc_dv.dv_xname);
1845 		return (EIO);
1846 	}
1847 
1848 	em_check_for_link(&sc->hw);
1849 
1850 	return (0);
1851 }
1852 
1853 /*********************************************************************
1854  *
1855  *  Setup networking device structure and register an interface.
1856  *
1857  **********************************************************************/
1858 void
1859 em_setup_interface(struct em_softc *sc)
1860 {
1861 	struct ifnet   *ifp;
1862 	u_char fiber_type = IFM_1000_SX;
1863 
1864 	INIT_DEBUGOUT("em_setup_interface: begin");
1865 
1866 	ifp = &sc->interface_data.ac_if;
1867 	strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
1868 	ifp->if_softc = sc;
1869 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1870 	ifp->if_ioctl = em_ioctl;
1871 	ifp->if_start = em_start;
1872 	ifp->if_watchdog = em_watchdog;
1873 	ifp->if_hardmtu =
1874 		sc->hw.max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN;
1875 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1876 	IFQ_SET_READY(&ifp->if_snd);
1877 
1878 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1879 
1880 #if NVLAN > 0
1881 	if (sc->hw.mac_type != em_82575 && sc->hw.mac_type != em_82580 &&
1882 	    sc->hw.mac_type != em_i210 && sc->hw.mac_type != em_i350)
1883 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1884 #endif
1885 
1886 	if (sc->hw.mac_type >= em_82543 && sc->hw.mac_type != em_82575 &&
1887 	    sc->hw.mac_type != em_82580 && sc->hw.mac_type != em_i210 &&
1888 	    sc->hw.mac_type != em_i350)
1889 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1890 
1891 	/*
1892 	 * Specify the media types supported by this adapter and register
1893 	 * callbacks to update media and link information
1894 	 */
1895 	ifmedia_init(&sc->media, IFM_IMASK, em_media_change,
1896 		     em_media_status);
1897 	if (sc->hw.media_type == em_media_type_fiber ||
1898 	    sc->hw.media_type == em_media_type_internal_serdes) {
1899 		if (sc->hw.mac_type == em_82545)
1900 			fiber_type = IFM_1000_LX;
1901 		ifmedia_add(&sc->media, IFM_ETHER | fiber_type | IFM_FDX,
1902 			    0, NULL);
1903 		ifmedia_add(&sc->media, IFM_ETHER | fiber_type,
1904 			    0, NULL);
1905 	} else {
1906 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1907 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1908 			    0, NULL);
1909 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX,
1910 			    0, NULL);
1911 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1912 			    0, NULL);
1913 		if (sc->hw.phy_type != em_phy_ife) {
1914 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1915 				    0, NULL);
1916 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1917 		}
1918 	}
1919 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1920 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1921 
1922 	if_attach(ifp);
1923 	ether_ifattach(ifp);
1924 }
1925 
1926 int
1927 em_detach(struct device *self, int flags)
1928 {
1929 	struct em_softc *sc = (struct em_softc *)self;
1930 	struct ifnet *ifp = &sc->interface_data.ac_if;
1931 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1932 	pci_chipset_tag_t	pc = pa->pa_pc;
1933 
1934 	if (sc->sc_intrhand)
1935 		pci_intr_disestablish(pc, sc->sc_intrhand);
1936 	sc->sc_intrhand = 0;
1937 
1938 	em_stop(sc, 1);
1939 
1940 	em_free_pci_resources(sc);
1941 	em_dma_free(sc, &sc->rxdma);
1942 	em_dma_free(sc, &sc->txdma);
1943 
1944 	ether_ifdetach(ifp);
1945 	if_detach(ifp);
1946 
1947 	return (0);
1948 }
1949 
1950 int
1951 em_activate(struct device *self, int act)
1952 {
1953 	struct em_softc *sc = (struct em_softc *)self;
1954 	struct ifnet *ifp = &sc->interface_data.ac_if;
1955 	int rv = 0;
1956 
1957 	switch (act) {
1958 	case DVACT_SUSPEND:
1959 		if (ifp->if_flags & IFF_RUNNING)
1960 			em_stop(sc, 0);
1961 		/* We have no children atm, but we will soon */
1962 		rv = config_activate_children(self, act);
1963 		break;
1964 	case DVACT_RESUME:
1965 		if (ifp->if_flags & IFF_UP)
1966 			em_init(sc);
1967 		break;
1968 	default:
1969 		rv = config_activate_children(self, act);
1970 		break;
1971 	}
1972 	return (rv);
1973 }
1974 
1975 /*********************************************************************
1976  *
1977  *  Workaround for SmartSpeed on 82541 and 82547 controllers
1978  *
1979  **********************************************************************/
1980 void
1981 em_smartspeed(struct em_softc *sc)
1982 {
1983 	uint16_t phy_tmp;
1984 
1985 	if (sc->link_active || (sc->hw.phy_type != em_phy_igp) ||
1986 	    !sc->hw.autoneg || !(sc->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1987 		return;
1988 
1989 	if (sc->smartspeed == 0) {
1990 		/* If Master/Slave config fault is asserted twice,
1991 		 * we assume back-to-back */
1992 		em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
1993 		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1994 			return;
1995 		em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
1996 		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1997 			em_read_phy_reg(&sc->hw, PHY_1000T_CTRL,
1998 					&phy_tmp);
1999 			if (phy_tmp & CR_1000T_MS_ENABLE) {
2000 				phy_tmp &= ~CR_1000T_MS_ENABLE;
2001 				em_write_phy_reg(&sc->hw,
2002 						    PHY_1000T_CTRL, phy_tmp);
2003 				sc->smartspeed++;
2004 				if (sc->hw.autoneg &&
2005 				    !em_phy_setup_autoneg(&sc->hw) &&
2006 				    !em_read_phy_reg(&sc->hw, PHY_CTRL,
2007 						       &phy_tmp)) {
2008 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
2009 						    MII_CR_RESTART_AUTO_NEG);
2010 					em_write_phy_reg(&sc->hw,
2011 							 PHY_CTRL, phy_tmp);
2012 				}
2013 			}
2014 		}
2015 		return;
2016 	} else if (sc->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2017 		/* If still no link, perhaps using 2/3 pair cable */
2018 		em_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
2019 		phy_tmp |= CR_1000T_MS_ENABLE;
2020 		em_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
2021 		if (sc->hw.autoneg &&
2022 		    !em_phy_setup_autoneg(&sc->hw) &&
2023 		    !em_read_phy_reg(&sc->hw, PHY_CTRL, &phy_tmp)) {
2024 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
2025 				    MII_CR_RESTART_AUTO_NEG);
2026 			em_write_phy_reg(&sc->hw, PHY_CTRL, phy_tmp);
2027 		}
2028 	}
2029 	/* Restart process after EM_SMARTSPEED_MAX iterations */
2030 	if (sc->smartspeed++ == EM_SMARTSPEED_MAX)
2031 		sc->smartspeed = 0;
2032 }
2033 
2034 /*
2035  * Manage DMA'able memory.
2036  */
2037 int
2038 em_dma_malloc(struct em_softc *sc, bus_size_t size,
2039     struct em_dma_alloc *dma, int mapflags)
2040 {
2041 	int r;
2042 
2043 	dma->dma_tag = sc->osdep.em_pa.pa_dmat;
2044 	r = bus_dmamap_create(dma->dma_tag, size, 1,
2045 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
2046 	if (r != 0) {
2047 		printf("%s: em_dma_malloc: bus_dmamap_create failed; "
2048 			"error %u\n", sc->sc_dv.dv_xname, r);
2049 		goto fail_0;
2050 	}
2051 
2052 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
2053 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
2054 	if (r != 0) {
2055 		printf("%s: em_dma_malloc: bus_dmammem_alloc failed; "
2056 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
2057 			(unsigned long)size, r);
2058 		goto fail_1;
2059 	}
2060 
2061 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
2062 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
2063 	if (r != 0) {
2064 		printf("%s: em_dma_malloc: bus_dmammem_map failed; "
2065 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
2066 			(unsigned long)size, r);
2067 		goto fail_2;
2068 	}
2069 
2070 	r = bus_dmamap_load(sc->osdep.em_pa.pa_dmat, dma->dma_map,
2071 			    dma->dma_vaddr, size, NULL,
2072 			    mapflags | BUS_DMA_NOWAIT);
2073 	if (r != 0) {
2074 		printf("%s: em_dma_malloc: bus_dmamap_load failed; "
2075 			"error %u\n", sc->sc_dv.dv_xname, r);
2076 		goto fail_3;
2077 	}
2078 
2079 	dma->dma_size = size;
2080 	return (0);
2081 
2082 fail_3:
2083 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
2084 fail_2:
2085 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
2086 fail_1:
2087 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
2088 fail_0:
2089 	dma->dma_map = NULL;
2090 	dma->dma_tag = NULL;
2091 
2092 	return (r);
2093 }
2094 
2095 void
2096 em_dma_free(struct em_softc *sc, struct em_dma_alloc *dma)
2097 {
2098 	if (dma->dma_tag == NULL)
2099 		return;
2100 
2101 	if (dma->dma_map != NULL) {
2102 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
2103 		    dma->dma_map->dm_mapsize,
2104 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2105 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2106 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
2107 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
2108 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
2109 	}
2110 	dma->dma_tag = NULL;
2111 }
2112 
2113 /*********************************************************************
2114  *
2115  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2116  *  the information needed to transmit a packet on the wire.
2117  *
2118  **********************************************************************/
2119 int
2120 em_allocate_transmit_structures(struct em_softc *sc)
2121 {
2122 	if (!(sc->tx_buffer_area = mallocarray(sc->num_tx_desc,
2123 	    sizeof(struct em_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2124 		printf("%s: Unable to allocate tx_buffer memory\n",
2125 		       sc->sc_dv.dv_xname);
2126 		return (ENOMEM);
2127 	}
2128 
2129 	return (0);
2130 }
2131 
2132 /*********************************************************************
2133  *
2134  *  Allocate and initialize transmit structures.
2135  *
2136  **********************************************************************/
2137 int
2138 em_setup_transmit_structures(struct em_softc *sc)
2139 {
2140 	struct  em_buffer *tx_buffer;
2141 	int error, i;
2142 
2143 	if ((error = em_allocate_transmit_structures(sc)) != 0)
2144 		goto fail;
2145 
2146 	bzero((void *) sc->tx_desc_base,
2147 	      (sizeof(struct em_tx_desc)) * sc->num_tx_desc);
2148 
2149 	sc->txtag = sc->osdep.em_pa.pa_dmat;
2150 
2151 	tx_buffer = sc->tx_buffer_area;
2152 	for (i = 0; i < sc->num_tx_desc; i++) {
2153 		error = bus_dmamap_create(sc->txtag, MAX_JUMBO_FRAME_SIZE,
2154 			    EM_MAX_SCATTER, MAX_JUMBO_FRAME_SIZE, 0,
2155 			    BUS_DMA_NOWAIT, &tx_buffer->map);
2156 		if (error != 0) {
2157 			printf("%s: Unable to create TX DMA map\n",
2158 			    sc->sc_dv.dv_xname);
2159 			goto fail;
2160 		}
2161 		tx_buffer++;
2162 	}
2163 
2164 	sc->next_avail_tx_desc = 0;
2165 	sc->next_tx_to_clean = 0;
2166 
2167 	/* Set number of descriptors available */
2168 	sc->num_tx_desc_avail = sc->num_tx_desc;
2169 
2170 	/* Set checksum context */
2171 	sc->active_checksum_context = OFFLOAD_NONE;
2172 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2173 	    sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2174 
2175 	return (0);
2176 
2177 fail:
2178 	em_free_transmit_structures(sc);
2179 	return (error);
2180 }
2181 
2182 /*********************************************************************
2183  *
2184  *  Enable transmit unit.
2185  *
2186  **********************************************************************/
2187 void
2188 em_initialize_transmit_unit(struct em_softc *sc)
2189 {
2190 	u_int32_t	reg_tctl, reg_tipg = 0;
2191 	u_int64_t	bus_addr;
2192 
2193 	INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2194 
2195 	/* Setup the Base and Length of the Tx Descriptor Ring */
2196 	bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
2197 	E1000_WRITE_REG(&sc->hw, TDLEN,
2198 			sc->num_tx_desc *
2199 			sizeof(struct em_tx_desc));
2200 	E1000_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2201 	E1000_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
2202 
2203 	/* Setup the HW Tx Head and Tail descriptor pointers */
2204 	E1000_WRITE_REG(&sc->hw, TDT, 0);
2205 	E1000_WRITE_REG(&sc->hw, TDH, 0);
2206 
2207 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2208 		     E1000_READ_REG(&sc->hw, TDBAL),
2209 		     E1000_READ_REG(&sc->hw, TDLEN));
2210 
2211 	/* Set the default values for the Tx Inter Packet Gap timer */
2212 	switch (sc->hw.mac_type) {
2213 	case em_82542_rev2_0:
2214 	case em_82542_rev2_1:
2215 		reg_tipg = DEFAULT_82542_TIPG_IPGT;
2216 		reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2217 		reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2218 		break;
2219 	case em_80003es2lan:
2220 		reg_tipg = DEFAULT_82543_TIPG_IPGR1;
2221 		reg_tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2222 		break;
2223 	default:
2224 		if (sc->hw.media_type == em_media_type_fiber ||
2225 		    sc->hw.media_type == em_media_type_internal_serdes)
2226 			reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2227 		else
2228 			reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2229 		reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2230 		reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2231 	}
2232 
2233 
2234 	E1000_WRITE_REG(&sc->hw, TIPG, reg_tipg);
2235 	E1000_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
2236 	if (sc->hw.mac_type >= em_82540)
2237 		E1000_WRITE_REG(&sc->hw, TADV, sc->tx_abs_int_delay);
2238 
2239 	/* Setup Transmit Descriptor Base Settings */
2240 	sc->txd_cmd = E1000_TXD_CMD_IFCS;
2241 
2242 	if (sc->hw.mac_type == em_82575 || sc->hw.mac_type == em_82580 ||
2243 	    sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350) {
2244 		/* 82575/6 need to enable the TX queue and lack the IDE bit */
2245 		reg_tctl = E1000_READ_REG(&sc->hw, TXDCTL);
2246 		reg_tctl |= E1000_TXDCTL_QUEUE_ENABLE;
2247 		E1000_WRITE_REG(&sc->hw, TXDCTL, reg_tctl);
2248 	} else if (sc->tx_int_delay > 0)
2249 		sc->txd_cmd |= E1000_TXD_CMD_IDE;
2250 
2251 	/* Program the Transmit Control Register */
2252 	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2253 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2254 	if (sc->hw.mac_type >= em_82571)
2255 		reg_tctl |= E1000_TCTL_MULR;
2256 	if (sc->link_duplex == FULL_DUPLEX)
2257 		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2258 	else
2259 		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2260 	/* This write will effectively turn on the transmit unit */
2261 	E1000_WRITE_REG(&sc->hw, TCTL, reg_tctl);
2262 }
2263 
2264 /*********************************************************************
2265  *
2266  *  Free all transmit related data structures.
2267  *
2268  **********************************************************************/
2269 void
2270 em_free_transmit_structures(struct em_softc *sc)
2271 {
2272 	struct em_buffer   *tx_buffer;
2273 	int		i;
2274 
2275 	INIT_DEBUGOUT("free_transmit_structures: begin");
2276 
2277 	if (sc->tx_buffer_area != NULL) {
2278 		tx_buffer = sc->tx_buffer_area;
2279 		for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2280 			if (tx_buffer->map != NULL &&
2281 			    tx_buffer->map->dm_nsegs > 0) {
2282 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
2283 				    0, tx_buffer->map->dm_mapsize,
2284 				    BUS_DMASYNC_POSTWRITE);
2285 				bus_dmamap_unload(sc->txtag,
2286 				    tx_buffer->map);
2287 			}
2288 			if (tx_buffer->m_head != NULL) {
2289 				m_freem(tx_buffer->m_head);
2290 				tx_buffer->m_head = NULL;
2291 			}
2292 			if (tx_buffer->map != NULL) {
2293 				bus_dmamap_destroy(sc->txtag,
2294 				    tx_buffer->map);
2295 				tx_buffer->map = NULL;
2296 			}
2297 		}
2298 	}
2299 	if (sc->tx_buffer_area != NULL) {
2300 		free(sc->tx_buffer_area, M_DEVBUF, 0);
2301 		sc->tx_buffer_area = NULL;
2302 	}
2303 	if (sc->txtag != NULL)
2304 		sc->txtag = NULL;
2305 }
2306 
2307 /*********************************************************************
2308  *
2309  *  The offload context needs to be set when we transfer the first
2310  *  packet of a particular protocol (TCP/UDP). We change the
2311  *  context only if the protocol type changes.
2312  *
2313  **********************************************************************/
2314 void
2315 em_transmit_checksum_setup(struct em_softc *sc, struct mbuf *mp,
2316     u_int32_t *txd_upper, u_int32_t *txd_lower)
2317 {
2318 	struct em_context_desc *TXD;
2319 	struct em_buffer *tx_buffer;
2320 	int curr_txd;
2321 
2322 	if (mp->m_pkthdr.csum_flags) {
2323 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) {
2324 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2325 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2326 			if (sc->active_checksum_context == OFFLOAD_TCP_IP)
2327 				return;
2328 			else
2329 				sc->active_checksum_context = OFFLOAD_TCP_IP;
2330 		} else if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) {
2331 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2332 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2333 			if (sc->active_checksum_context == OFFLOAD_UDP_IP)
2334 				return;
2335 			else
2336 				sc->active_checksum_context = OFFLOAD_UDP_IP;
2337 		} else {
2338 			*txd_upper = 0;
2339 			*txd_lower = 0;
2340 			return;
2341 		}
2342 	} else {
2343 		*txd_upper = 0;
2344 		*txd_lower = 0;
2345 		return;
2346 	}
2347 
2348 	/* If we reach this point, the checksum offload context
2349 	 * needs to be reset.
2350 	 */
2351 	curr_txd = sc->next_avail_tx_desc;
2352 	tx_buffer = &sc->tx_buffer_area[curr_txd];
2353 	TXD = (struct em_context_desc *) &sc->tx_desc_base[curr_txd];
2354 
2355 	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2356 	TXD->lower_setup.ip_fields.ipcso =
2357 	    ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2358 	TXD->lower_setup.ip_fields.ipcse =
2359 	    htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2360 
2361 	TXD->upper_setup.tcp_fields.tucss =
2362 	    ETHER_HDR_LEN + sizeof(struct ip);
2363 	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2364 
2365 	if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
2366 		TXD->upper_setup.tcp_fields.tucso =
2367 		    ETHER_HDR_LEN + sizeof(struct ip) +
2368 		    offsetof(struct tcphdr, th_sum);
2369 	} else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
2370 		TXD->upper_setup.tcp_fields.tucso =
2371 		    ETHER_HDR_LEN + sizeof(struct ip) +
2372 		    offsetof(struct udphdr, uh_sum);
2373 	}
2374 
2375 	TXD->tcp_seg_setup.data = htole32(0);
2376 	TXD->cmd_and_length = htole32(sc->txd_cmd | E1000_TXD_CMD_DEXT);
2377 
2378 	tx_buffer->m_head = NULL;
2379 	tx_buffer->next_eop = -1;
2380 
2381 	if (++curr_txd == sc->num_tx_desc)
2382 		curr_txd = 0;
2383 
2384 	sc->num_tx_desc_avail--;
2385 	sc->next_avail_tx_desc = curr_txd;
2386 }
2387 
2388 /**********************************************************************
2389  *
2390  *  Examine each tx_buffer in the used queue. If the hardware is done
2391  *  processing the packet then free associated resources. The
2392  *  tx_buffer is put back on the free queue.
2393  *
2394  **********************************************************************/
2395 void
2396 em_txeof(struct em_softc *sc)
2397 {
2398 	int first, last, done, num_avail;
2399 	struct em_buffer *tx_buffer;
2400 	struct em_tx_desc   *tx_desc, *eop_desc;
2401 	struct ifnet   *ifp = &sc->interface_data.ac_if;
2402 
2403 	if (sc->num_tx_desc_avail == sc->num_tx_desc)
2404 		return;
2405 
2406 	num_avail = sc->num_tx_desc_avail;
2407 	first = sc->next_tx_to_clean;
2408 	tx_desc = &sc->tx_desc_base[first];
2409 	tx_buffer = &sc->tx_buffer_area[first];
2410 	last = tx_buffer->next_eop;
2411 	eop_desc = &sc->tx_desc_base[last];
2412 
2413 	/*
2414 	 * What this does is get the index of the
2415 	 * first descriptor AFTER the EOP of the
2416 	 * first packet, that way we can do the
2417 	 * simple comparison on the inner while loop.
2418 	 */
2419 	if (++last == sc->num_tx_desc)
2420 		last = 0;
2421 	done = last;
2422 
2423 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2424 	    sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2425 	while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2426 		/* We clean the range of the packet */
2427 		while (first != done) {
2428 			tx_desc->upper.data = 0;
2429 			tx_desc->lower.data = 0;
2430 			num_avail++;
2431 
2432 			if (tx_buffer->m_head != NULL) {
2433 				ifp->if_opackets++;
2434 				if (tx_buffer->map->dm_nsegs > 0) {
2435 					bus_dmamap_sync(sc->txtag,
2436 					    tx_buffer->map, 0,
2437 					    tx_buffer->map->dm_mapsize,
2438 					    BUS_DMASYNC_POSTWRITE);
2439 					bus_dmamap_unload(sc->txtag,
2440 					    tx_buffer->map);
2441 				}
2442 				m_freem(tx_buffer->m_head);
2443 				tx_buffer->m_head = NULL;
2444 			}
2445 			tx_buffer->next_eop = -1;
2446 
2447 			if (++first == sc->num_tx_desc)
2448 				first = 0;
2449 
2450 			tx_buffer = &sc->tx_buffer_area[first];
2451 			tx_desc = &sc->tx_desc_base[first];
2452 		}
2453 		/* See if we can continue to the next packet */
2454 		last = tx_buffer->next_eop;
2455 		if (last != -1) {
2456 			eop_desc = &sc->tx_desc_base[last];
2457 			/* Get new done point */
2458 			if (++last == sc->num_tx_desc)
2459 				last = 0;
2460 			done = last;
2461 		} else
2462 			break;
2463 	}
2464 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2465 	    sc->txdma.dma_map->dm_mapsize,
2466 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2467 
2468 	sc->next_tx_to_clean = first;
2469 
2470 	/*
2471 	 * If we have enough room, clear IFF_OACTIVE to tell the stack
2472 	 * that it is OK to send packets.
2473 	 * If there are no pending descriptors, clear the timeout. Otherwise,
2474 	 * if some descriptors have been freed, restart the timeout.
2475 	 */
2476 	if (num_avail > EM_TX_CLEANUP_THRESHOLD)
2477 		ifp->if_flags &= ~IFF_OACTIVE;
2478 
2479 	/* All clean, turn off the timer */
2480 	if (num_avail == sc->num_tx_desc)
2481 		ifp->if_timer = 0;
2482 	/* Some cleaned, reset the timer */
2483 	else if (num_avail != sc->num_tx_desc_avail)
2484 		ifp->if_timer = EM_TX_TIMEOUT;
2485 
2486 	sc->num_tx_desc_avail = num_avail;
2487 }
2488 
2489 /*********************************************************************
2490  *
2491  *  Get a buffer from system mbuf buffer pool.
2492  *
2493  **********************************************************************/
2494 int
2495 em_get_buf(struct em_softc *sc, int i)
2496 {
2497 	struct mbuf    *m;
2498 	struct em_buffer *pkt;
2499 	struct em_rx_desc *desc;
2500 	int error;
2501 
2502 	pkt = &sc->rx_buffer_area[i];
2503 	desc = &sc->rx_desc_base[i];
2504 
2505 	if (pkt->m_head != NULL) {
2506 		printf("%s: em_get_buf: slot %d already has an mbuf\n",
2507 		    sc->sc_dv.dv_xname, i);
2508 		return (ENOBUFS);
2509 	}
2510 
2511 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
2512 	if (!m) {
2513 		sc->mbuf_cluster_failed++;
2514 		return (ENOBUFS);
2515 	}
2516 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2517 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2518 		m_adj(m, ETHER_ALIGN);
2519 
2520 	error = bus_dmamap_load_mbuf(sc->rxtag, pkt->map, m, BUS_DMA_NOWAIT);
2521 	if (error) {
2522 		m_freem(m);
2523 		return (error);
2524 	}
2525 
2526 	bus_dmamap_sync(sc->rxtag, pkt->map, 0, pkt->map->dm_mapsize,
2527 	    BUS_DMASYNC_PREREAD);
2528 	pkt->m_head = m;
2529 
2530 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2531 	    sizeof(*desc) * i, sizeof(*desc), BUS_DMASYNC_POSTWRITE);
2532 
2533 	bzero(desc, sizeof(*desc));
2534 	desc->buffer_addr = htole64(pkt->map->dm_segs[0].ds_addr);
2535 
2536 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2537 	    sizeof(*desc) * i, sizeof(*desc), BUS_DMASYNC_PREWRITE);
2538 
2539 	return (0);
2540 }
2541 
2542 /*********************************************************************
2543  *
2544  *  Allocate memory for rx_buffer structures. Since we use one
2545  *  rx_buffer per received packet, the maximum number of rx_buffer's
2546  *  that we'll need is equal to the number of receive descriptors
2547  *  that we've allocated.
2548  *
2549  **********************************************************************/
2550 int
2551 em_allocate_receive_structures(struct em_softc *sc)
2552 {
2553 	int		i, error;
2554 	struct em_buffer *rx_buffer;
2555 
2556 	if (!(sc->rx_buffer_area = mallocarray(sc->num_rx_desc,
2557 	    sizeof(struct em_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2558 		printf("%s: Unable to allocate rx_buffer memory\n",
2559 		       sc->sc_dv.dv_xname);
2560 		return (ENOMEM);
2561 	}
2562 
2563 	sc->rxtag = sc->osdep.em_pa.pa_dmat;
2564 
2565 	rx_buffer = sc->rx_buffer_area;
2566 	for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
2567 		error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
2568 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rx_buffer->map);
2569 		if (error != 0) {
2570 			printf("%s: em_allocate_receive_structures: "
2571 			    "bus_dmamap_create failed; error %u\n",
2572 			    sc->sc_dv.dv_xname, error);
2573 			goto fail;
2574 		}
2575 		rx_buffer->m_head = NULL;
2576 	}
2577 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
2578 	    sc->rxdma.dma_map->dm_mapsize,
2579 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2580 
2581         return (0);
2582 
2583 fail:
2584 	em_free_receive_structures(sc);
2585 	return (error);
2586 }
2587 
2588 /*********************************************************************
2589  *
2590  *  Allocate and initialize receive structures.
2591  *
2592  **********************************************************************/
2593 int
2594 em_setup_receive_structures(struct em_softc *sc)
2595 {
2596 	struct ifnet *ifp = &sc->interface_data.ac_if;
2597 
2598 	memset(sc->rx_desc_base, 0,
2599 	    sizeof(struct em_rx_desc) * sc->num_rx_desc);
2600 
2601 	if (em_allocate_receive_structures(sc))
2602 		return (ENOMEM);
2603 
2604 	/* Setup our descriptor pointers */
2605 	sc->next_rx_desc_to_check = 0;
2606 	sc->last_rx_desc_filled = sc->num_rx_desc - 1;
2607 
2608 	if_rxr_init(&sc->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
2609 	    sc->num_rx_desc);
2610 
2611 	if (em_rxfill(sc) == 0) {
2612 		printf("%s: unable to fill any rx descriptors\n",
2613 		    sc->sc_dv.dv_xname);
2614 	}
2615 
2616 	return (0);
2617 }
2618 
2619 /*********************************************************************
2620  *
2621  *  Enable receive unit.
2622  *
2623  **********************************************************************/
2624 void
2625 em_initialize_receive_unit(struct em_softc *sc)
2626 {
2627 	u_int32_t	reg_rctl;
2628 	u_int32_t	reg_rxcsum;
2629 	u_int64_t	bus_addr;
2630 
2631 	INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2632 
2633 	/* Make sure receives are disabled while setting up the descriptor ring */
2634 	E1000_WRITE_REG(&sc->hw, RCTL, 0);
2635 
2636 	/* Set the Receive Delay Timer Register */
2637 	E1000_WRITE_REG(&sc->hw, RDTR,
2638 			sc->rx_int_delay | E1000_RDT_FPDB);
2639 
2640 	if (sc->hw.mac_type >= em_82540) {
2641 		if (sc->rx_int_delay)
2642 			E1000_WRITE_REG(&sc->hw, RADV, sc->rx_abs_int_delay);
2643 
2644 		/* Set the interrupt throttling rate.  Value is calculated
2645 		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2646 		E1000_WRITE_REG(&sc->hw, ITR, DEFAULT_ITR);
2647 	}
2648 
2649 	/* Setup the Base and Length of the Rx Descriptor Ring */
2650 	bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
2651 	E1000_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
2652 			sizeof(struct em_rx_desc));
2653 	E1000_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2654 	E1000_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
2655 
2656 	/* Setup the Receive Control Register */
2657 	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2658 	    E1000_RCTL_RDMTS_HALF |
2659 	    (sc->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2660 
2661 	if (sc->hw.tbi_compatibility_on == TRUE)
2662 		reg_rctl |= E1000_RCTL_SBP;
2663 
2664 	/*
2665 	 * The i350 has a bug where it always strips the CRC whether
2666 	 * asked to or not.  So ask for stripped CRC here and
2667 	 * cope in rxeof
2668 	 */
2669 	if (sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350)
2670 		reg_rctl |= E1000_RCTL_SECRC;
2671 
2672 	switch (sc->rx_buffer_len) {
2673 	default:
2674 	case EM_RXBUFFER_2048:
2675 		reg_rctl |= E1000_RCTL_SZ_2048;
2676 		break;
2677 	case EM_RXBUFFER_4096:
2678 		reg_rctl |= E1000_RCTL_SZ_4096|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2679 		break;
2680 	case EM_RXBUFFER_8192:
2681 		reg_rctl |= E1000_RCTL_SZ_8192|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2682 		break;
2683 	case EM_RXBUFFER_16384:
2684 		reg_rctl |= E1000_RCTL_SZ_16384|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2685 		break;
2686 	}
2687 
2688 	if (sc->hw.max_frame_size != ETHER_MAX_LEN)
2689 		reg_rctl |= E1000_RCTL_LPE;
2690 
2691 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2692 	if (sc->hw.mac_type >= em_82543) {
2693 		reg_rxcsum = E1000_READ_REG(&sc->hw, RXCSUM);
2694 		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2695 		E1000_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
2696 	}
2697 
2698 	/*
2699 	 * XXX TEMPORARY WORKAROUND: on some systems with 82573
2700 	 * long latencies are observed, like Lenovo X60.
2701 	 */
2702 	if (sc->hw.mac_type == em_82573)
2703 		E1000_WRITE_REG(&sc->hw, RDTR, 0x20);
2704 
2705 	if (sc->hw.mac_type == em_82575 || sc->hw.mac_type == em_82580 ||
2706 	    sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350) {
2707 		/* 82575/6 need to enable the RX queue */
2708 		uint32_t reg;
2709 		reg = E1000_READ_REG(&sc->hw, RXDCTL);
2710 		reg |= E1000_RXDCTL_QUEUE_ENABLE;
2711 		E1000_WRITE_REG(&sc->hw, RXDCTL, reg);
2712 	}
2713 
2714 	/* Enable Receives */
2715 	E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
2716 
2717 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2718 	E1000_WRITE_REG(&sc->hw, RDH, 0);
2719 	E1000_WRITE_REG(&sc->hw, RDT, sc->last_rx_desc_filled);
2720 }
2721 
2722 /*********************************************************************
2723  *
2724  *  Free receive related data structures.
2725  *
2726  **********************************************************************/
2727 void
2728 em_free_receive_structures(struct em_softc *sc)
2729 {
2730 	struct em_buffer   *rx_buffer;
2731 	int		i;
2732 
2733 	INIT_DEBUGOUT("free_receive_structures: begin");
2734 
2735 	if (sc->rx_buffer_area != NULL) {
2736 		rx_buffer = sc->rx_buffer_area;
2737 		for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
2738 			if (rx_buffer->m_head != NULL) {
2739 				bus_dmamap_sync(sc->rxtag, rx_buffer->map,
2740 				    0, rx_buffer->map->dm_mapsize,
2741 				    BUS_DMASYNC_POSTREAD);
2742 				bus_dmamap_unload(sc->rxtag, rx_buffer->map);
2743 				m_freem(rx_buffer->m_head);
2744 				rx_buffer->m_head = NULL;
2745 			}
2746 			bus_dmamap_destroy(sc->rxtag, rx_buffer->map);
2747 		}
2748 	}
2749 	if (sc->rx_buffer_area != NULL) {
2750 		free(sc->rx_buffer_area, M_DEVBUF, 0);
2751 		sc->rx_buffer_area = NULL;
2752 	}
2753 	if (sc->rxtag != NULL)
2754 		sc->rxtag = NULL;
2755 
2756 	if (sc->fmp != NULL) {
2757 		m_freem(sc->fmp);
2758 		sc->fmp = NULL;
2759 		sc->lmp = NULL;
2760 	}
2761 }
2762 
2763 #ifdef __STRICT_ALIGNMENT
2764 void
2765 em_realign(struct em_softc *sc, struct mbuf *m, u_int16_t *prev_len_adj)
2766 {
2767 	unsigned char tmp_align_buf[ETHER_ALIGN];
2768 	int tmp_align_buf_len = 0;
2769 
2770 	/*
2771 	 * The Ethernet payload is not 32-bit aligned when
2772 	 * Jumbo packets are enabled, so on architectures with
2773 	 * strict alignment we need to shift the entire packet
2774 	 * ETHER_ALIGN bytes. Ugh.
2775 	 */
2776 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2777 		return;
2778 
2779 	if (*prev_len_adj > sc->align_buf_len)
2780 		*prev_len_adj -= sc->align_buf_len;
2781 	else
2782 		*prev_len_adj = 0;
2783 
2784 	if (m->m_len > (MCLBYTES - ETHER_ALIGN)) {
2785 		bcopy(m->m_data + (MCLBYTES - ETHER_ALIGN),
2786 		    &tmp_align_buf, ETHER_ALIGN);
2787 		tmp_align_buf_len = m->m_len -
2788 		    (MCLBYTES - ETHER_ALIGN);
2789 		m->m_len -= ETHER_ALIGN;
2790 	}
2791 
2792 	if (m->m_len) {
2793 		bcopy(m->m_data, m->m_data + ETHER_ALIGN, m->m_len);
2794 		if (!sc->align_buf_len)
2795 			m->m_data += ETHER_ALIGN;
2796 	}
2797 
2798 	if (sc->align_buf_len) {
2799 		m->m_len += sc->align_buf_len;
2800 		bcopy(&sc->align_buf, m->m_data, sc->align_buf_len);
2801 	}
2802 
2803 	if (tmp_align_buf_len)
2804 		bcopy(&tmp_align_buf, &sc->align_buf, tmp_align_buf_len);
2805 
2806 	sc->align_buf_len = tmp_align_buf_len;
2807 }
2808 #endif /* __STRICT_ALIGNMENT */
2809 
2810 int
2811 em_rxfill(struct em_softc *sc)
2812 {
2813 	u_int slots;
2814 	int post = 0;
2815 	int i;
2816 
2817 	i = sc->last_rx_desc_filled;
2818 
2819 	for (slots = if_rxr_get(&sc->rx_ring, sc->num_rx_desc);
2820 	    slots > 0; slots--) {
2821 		if (++i == sc->num_rx_desc)
2822 			i = 0;
2823 
2824 		if (em_get_buf(sc, i) != 0)
2825 			break;
2826 
2827 		post = 1;
2828 	}
2829 
2830 	sc->last_rx_desc_filled = i;
2831 	if_rxr_put(&sc->rx_ring, slots);
2832 
2833 	return (post);
2834 }
2835 
2836 /*********************************************************************
2837  *
2838  *  This routine executes in interrupt context. It replenishes
2839  *  the mbufs in the descriptor and sends data which has been
2840  *  dma'ed into host memory to upper layer.
2841  *
2842  *********************************************************************/
2843 void
2844 em_rxeof(struct em_softc *sc)
2845 {
2846 	struct ifnet	    *ifp = &sc->interface_data.ac_if;
2847 	struct mbuf	    *m;
2848 	u_int8_t	    accept_frame = 0;
2849 	u_int8_t	    eop = 0;
2850 	u_int16_t	    len, desc_len, prev_len_adj;
2851 	int		    i;
2852 
2853 	/* Pointer to the receive descriptor being examined. */
2854 	struct em_rx_desc   *desc;
2855 	struct em_buffer    *pkt;
2856 	u_int8_t	    status;
2857 
2858 	if (if_rxr_inuse(&sc->rx_ring) == 0)
2859 		return;
2860 
2861 	i = sc->next_rx_desc_to_check;
2862 
2863 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2864 	    0, sizeof(*desc) * sc->num_rx_desc,
2865 	    BUS_DMASYNC_POSTREAD);
2866 
2867 	do {
2868 		m = NULL;
2869 
2870 		desc = &sc->rx_desc_base[i];
2871 		pkt = &sc->rx_buffer_area[i];
2872 
2873 		status = desc->status;
2874 		if (!ISSET(status, E1000_RXD_STAT_DD))
2875 			break;
2876 
2877 		/* pull the mbuf off the ring */
2878 		bus_dmamap_sync(sc->rxtag, pkt->map, 0, pkt->map->dm_mapsize,
2879 		    BUS_DMASYNC_POSTREAD);
2880 		bus_dmamap_unload(sc->rxtag, pkt->map);
2881 		m = pkt->m_head;
2882 		pkt->m_head = NULL;
2883 
2884 		if (m == NULL) {
2885 			panic("em_rxeof: NULL mbuf in slot %d "
2886 			    "(nrx %d, filled %d)", i,
2887 			    if_rxr_inuse(&sc->rx_ring),
2888 			    sc->last_rx_desc_filled);
2889 		}
2890 
2891 		if_rxr_put(&sc->rx_ring, 1);
2892 
2893 		accept_frame = 1;
2894 		prev_len_adj = 0;
2895 		desc_len = letoh16(desc->length);
2896 
2897 		if (status & E1000_RXD_STAT_EOP) {
2898 			eop = 1;
2899 			if (desc_len < ETHER_CRC_LEN) {
2900 				len = 0;
2901 				prev_len_adj = ETHER_CRC_LEN - desc_len;
2902 			} else if (sc->hw.mac_type == em_i210 ||
2903 			    sc->hw.mac_type == em_i350)
2904 				len = desc_len;
2905 			else
2906 				len = desc_len - ETHER_CRC_LEN;
2907 		} else {
2908 			eop = 0;
2909 			len = desc_len;
2910 		}
2911 
2912 		if (desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2913 			u_int8_t last_byte;
2914 			u_int32_t pkt_len = desc_len;
2915 
2916 			if (sc->fmp != NULL)
2917 				pkt_len += sc->fmp->m_pkthdr.len;
2918 
2919 			last_byte = *(mtod(m, caddr_t) + desc_len - 1);
2920 			if (TBI_ACCEPT(&sc->hw, status, desc->errors,
2921 			    pkt_len, last_byte)) {
2922 #ifndef SMALL_KERNEL
2923 				em_tbi_adjust_stats(&sc->hw, &sc->stats,
2924 				    pkt_len, sc->hw.mac_addr);
2925 #endif
2926 				if (len > 0)
2927 					len--;
2928 			} else
2929 				accept_frame = 0;
2930 		}
2931 
2932 		if (accept_frame) {
2933 			/* Assign correct length to the current fragment */
2934 			m->m_len = len;
2935 
2936 			em_realign(sc, m, &prev_len_adj); /* STRICT_ALIGN */
2937 
2938 			if (sc->fmp == NULL) {
2939 				m->m_pkthdr.len = m->m_len;
2940 				sc->fmp = m;	 /* Store the first mbuf */
2941 				sc->lmp = m;
2942 			} else {
2943 				/* Chain mbuf's together */
2944 				m->m_flags &= ~M_PKTHDR;
2945 				/*
2946 				 * Adjust length of previous mbuf in chain if
2947 				 * we received less than 4 bytes in the last
2948 				 * descriptor.
2949 				 */
2950 				if (prev_len_adj > 0) {
2951 					sc->lmp->m_len -= prev_len_adj;
2952 					sc->fmp->m_pkthdr.len -= prev_len_adj;
2953 				}
2954 				sc->lmp->m_next = m;
2955 				sc->lmp = m;
2956 				sc->fmp->m_pkthdr.len += m->m_len;
2957 			}
2958 
2959 			if (eop) {
2960 				ifp->if_ipackets++;
2961 
2962 				m = sc->fmp;
2963 				m->m_pkthdr.rcvif = ifp;
2964 
2965 				em_receive_checksum(sc, desc, m);
2966 #if NVLAN > 0
2967 				if (desc->status & E1000_RXD_STAT_VP) {
2968 					m->m_pkthdr.ether_vtag =
2969 					    letoh16(desc->special);
2970 					m->m_flags |= M_VLANTAG;
2971 				}
2972 #endif
2973 #if NBPFILTER > 0
2974 				if (ifp->if_bpf) {
2975 					bpf_mtap_ether(ifp->if_bpf, m,
2976 					    BPF_DIRECTION_IN);
2977 				}
2978 #endif
2979 
2980 				ether_input_mbuf(ifp, m);
2981 
2982 				sc->fmp = NULL;
2983 				sc->lmp = NULL;
2984 			}
2985 		} else {
2986 			sc->dropped_pkts++;
2987 
2988 			if (sc->fmp != NULL) {
2989  				m_freem(sc->fmp);
2990 				sc->fmp = NULL;
2991 				sc->lmp = NULL;
2992 			}
2993 
2994 			m_freem(m);
2995 		}
2996 
2997 		/* Advance our pointers to the next descriptor. */
2998 		if (++i == sc->num_rx_desc)
2999 			i = 0;
3000 	} while (if_rxr_inuse(&sc->rx_ring) > 0);
3001 
3002 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
3003 	    0, sizeof(*desc) * sc->num_rx_desc,
3004 	    BUS_DMASYNC_PREREAD);
3005 
3006 	sc->next_rx_desc_to_check = i;
3007 }
3008 
3009 /*********************************************************************
3010  *
3011  *  Verify that the hardware indicated that the checksum is valid.
3012  *  Inform the stack about the status of checksum so that stack
3013  *  doesn't spend time verifying the checksum.
3014  *
3015  *********************************************************************/
3016 void
3017 em_receive_checksum(struct em_softc *sc, struct em_rx_desc *rx_desc,
3018     struct mbuf *mp)
3019 {
3020 	/* 82543 or newer only */
3021 	if ((sc->hw.mac_type < em_82543) ||
3022 	    /* Ignore Checksum bit is set */
3023 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3024 		mp->m_pkthdr.csum_flags = 0;
3025 		return;
3026 	}
3027 
3028 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3029 		/* Did it pass? */
3030 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3031 			/* IP Checksum Good */
3032 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
3033 
3034 		} else
3035 			mp->m_pkthdr.csum_flags = 0;
3036 	}
3037 
3038 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3039 		/* Did it pass? */
3040 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE))
3041 			mp->m_pkthdr.csum_flags |=
3042 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
3043 	}
3044 }
3045 
3046 /*
3047  * This turns on the hardware offload of the VLAN
3048  * tag insertion and strip
3049  */
3050 void
3051 em_enable_hw_vlans(struct em_softc *sc)
3052 {
3053 	uint32_t ctrl;
3054 
3055 	ctrl = E1000_READ_REG(&sc->hw, CTRL);
3056 	ctrl |= E1000_CTRL_VME;
3057 	E1000_WRITE_REG(&sc->hw, CTRL, ctrl);
3058 }
3059 
3060 void
3061 em_enable_intr(struct em_softc *sc)
3062 {
3063 	E1000_WRITE_REG(&sc->hw, IMS, (IMS_ENABLE_MASK));
3064 }
3065 
3066 void
3067 em_disable_intr(struct em_softc *sc)
3068 {
3069 	/*
3070 	 * The first version of 82542 had an errata where when link
3071 	 * was forced it would stay up even if the cable was disconnected
3072 	 * Sequence errors were used to detect the disconnect and then
3073 	 * the driver would unforce the link.  This code is in the ISR.
3074 	 * For this to work correctly the Sequence error interrupt had
3075 	 * to be enabled all the time.
3076 	 */
3077 
3078 	if (sc->hw.mac_type == em_82542_rev2_0)
3079 		E1000_WRITE_REG(&sc->hw, IMC, (0xffffffff & ~E1000_IMC_RXSEQ));
3080 	else
3081 		E1000_WRITE_REG(&sc->hw, IMC, 0xffffffff);
3082 }
3083 
3084 void
3085 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3086 {
3087 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3088 	pcireg_t val;
3089 
3090 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, reg & ~0x3);
3091 	if (reg & 0x2) {
3092 		val &= 0x0000ffff;
3093 		val |= (*value << 16);
3094 	} else {
3095 		val &= 0xffff0000;
3096 		val |= *value;
3097 	}
3098 	pci_conf_write(pa->pa_pc, pa->pa_tag, reg & ~0x3, val);
3099 }
3100 
3101 void
3102 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3103 {
3104 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3105 	pcireg_t val;
3106 
3107 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, reg & ~0x3);
3108 	if (reg & 0x2)
3109 		*value = (val >> 16) & 0xffff;
3110 	else
3111 		*value = val & 0xffff;
3112 }
3113 
3114 void
3115 em_pci_set_mwi(struct em_hw *hw)
3116 {
3117 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3118 
3119 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3120 		(hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE));
3121 }
3122 
3123 void
3124 em_pci_clear_mwi(struct em_hw *hw)
3125 {
3126 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3127 
3128 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3129 		(hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE));
3130 }
3131 
3132 /*
3133  * We may eventually really do this, but its unnecessary
3134  * for now so we just return unsupported.
3135  */
3136 int32_t
3137 em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3138 {
3139 	return -E1000_NOT_IMPLEMENTED;
3140 }
3141 
3142 /*********************************************************************
3143 * 82544 Coexistence issue workaround.
3144 *    There are 2 issues.
3145 *       1. Transmit Hang issue.
3146 *    To detect this issue, following equation can be used...
3147 *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3148 *          If SUM[3:0] is in between 1 to 4, we will have this issue.
3149 *
3150 *       2. DAC issue.
3151 *    To detect this issue, following equation can be used...
3152 *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3153 *          If SUM[3:0] is in between 9 to c, we will have this issue.
3154 *
3155 *
3156 *    WORKAROUND:
3157 *          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3158 *
3159 *** *********************************************************************/
3160 u_int32_t
3161 em_fill_descriptors(u_int64_t address, u_int32_t length,
3162     PDESC_ARRAY desc_array)
3163 {
3164         /* Since issue is sensitive to length and address.*/
3165         /* Let us first check the address...*/
3166         u_int32_t safe_terminator;
3167         if (length <= 4) {
3168                 desc_array->descriptor[0].address = address;
3169                 desc_array->descriptor[0].length = length;
3170                 desc_array->elements = 1;
3171                 return desc_array->elements;
3172         }
3173         safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3174         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3175         if (safe_terminator == 0   ||
3176         (safe_terminator > 4   &&
3177         safe_terminator < 9)   ||
3178         (safe_terminator > 0xC &&
3179         safe_terminator <= 0xF)) {
3180                 desc_array->descriptor[0].address = address;
3181                 desc_array->descriptor[0].length = length;
3182                 desc_array->elements = 1;
3183                 return desc_array->elements;
3184         }
3185 
3186         desc_array->descriptor[0].address = address;
3187         desc_array->descriptor[0].length = length - 4;
3188         desc_array->descriptor[1].address = address + (length - 4);
3189         desc_array->descriptor[1].length = 4;
3190         desc_array->elements = 2;
3191         return desc_array->elements;
3192 }
3193 
3194 #ifndef SMALL_KERNEL
3195 /**********************************************************************
3196  *
3197  *  Update the board statistics counters.
3198  *
3199  **********************************************************************/
3200 void
3201 em_update_stats_counters(struct em_softc *sc)
3202 {
3203 	struct ifnet   *ifp;
3204 
3205 	if (sc->hw.media_type == em_media_type_copper ||
3206 	    (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU)) {
3207 		sc->stats.symerrs += E1000_READ_REG(&sc->hw, SYMERRS);
3208 		sc->stats.sec += E1000_READ_REG(&sc->hw, SEC);
3209 	}
3210 	sc->stats.crcerrs += E1000_READ_REG(&sc->hw, CRCERRS);
3211 	sc->stats.mpc += E1000_READ_REG(&sc->hw, MPC);
3212 	sc->stats.scc += E1000_READ_REG(&sc->hw, SCC);
3213 	sc->stats.ecol += E1000_READ_REG(&sc->hw, ECOL);
3214 
3215 	sc->stats.mcc += E1000_READ_REG(&sc->hw, MCC);
3216 	sc->stats.latecol += E1000_READ_REG(&sc->hw, LATECOL);
3217 	sc->stats.colc += E1000_READ_REG(&sc->hw, COLC);
3218 	sc->stats.dc += E1000_READ_REG(&sc->hw, DC);
3219 	sc->stats.rlec += E1000_READ_REG(&sc->hw, RLEC);
3220 	sc->stats.xonrxc += E1000_READ_REG(&sc->hw, XONRXC);
3221 	sc->stats.xontxc += E1000_READ_REG(&sc->hw, XONTXC);
3222 	sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, XOFFRXC);
3223 	sc->stats.xofftxc += E1000_READ_REG(&sc->hw, XOFFTXC);
3224 	sc->stats.fcruc += E1000_READ_REG(&sc->hw, FCRUC);
3225 	sc->stats.prc64 += E1000_READ_REG(&sc->hw, PRC64);
3226 	sc->stats.prc127 += E1000_READ_REG(&sc->hw, PRC127);
3227 	sc->stats.prc255 += E1000_READ_REG(&sc->hw, PRC255);
3228 	sc->stats.prc511 += E1000_READ_REG(&sc->hw, PRC511);
3229 	sc->stats.prc1023 += E1000_READ_REG(&sc->hw, PRC1023);
3230 	sc->stats.prc1522 += E1000_READ_REG(&sc->hw, PRC1522);
3231 	sc->stats.gprc += E1000_READ_REG(&sc->hw, GPRC);
3232 	sc->stats.bprc += E1000_READ_REG(&sc->hw, BPRC);
3233 	sc->stats.mprc += E1000_READ_REG(&sc->hw, MPRC);
3234 	sc->stats.gptc += E1000_READ_REG(&sc->hw, GPTC);
3235 
3236 	/* For the 64-bit byte counters the low dword must be read first. */
3237 	/* Both registers clear on the read of the high dword */
3238 
3239 	sc->stats.gorcl += E1000_READ_REG(&sc->hw, GORCL);
3240 	sc->stats.gorch += E1000_READ_REG(&sc->hw, GORCH);
3241 	sc->stats.gotcl += E1000_READ_REG(&sc->hw, GOTCL);
3242 	sc->stats.gotch += E1000_READ_REG(&sc->hw, GOTCH);
3243 
3244 	sc->stats.rnbc += E1000_READ_REG(&sc->hw, RNBC);
3245 	sc->stats.ruc += E1000_READ_REG(&sc->hw, RUC);
3246 	sc->stats.rfc += E1000_READ_REG(&sc->hw, RFC);
3247 	sc->stats.roc += E1000_READ_REG(&sc->hw, ROC);
3248 	sc->stats.rjc += E1000_READ_REG(&sc->hw, RJC);
3249 
3250 	sc->stats.torl += E1000_READ_REG(&sc->hw, TORL);
3251 	sc->stats.torh += E1000_READ_REG(&sc->hw, TORH);
3252 	sc->stats.totl += E1000_READ_REG(&sc->hw, TOTL);
3253 	sc->stats.toth += E1000_READ_REG(&sc->hw, TOTH);
3254 
3255 	sc->stats.tpr += E1000_READ_REG(&sc->hw, TPR);
3256 	sc->stats.tpt += E1000_READ_REG(&sc->hw, TPT);
3257 	sc->stats.ptc64 += E1000_READ_REG(&sc->hw, PTC64);
3258 	sc->stats.ptc127 += E1000_READ_REG(&sc->hw, PTC127);
3259 	sc->stats.ptc255 += E1000_READ_REG(&sc->hw, PTC255);
3260 	sc->stats.ptc511 += E1000_READ_REG(&sc->hw, PTC511);
3261 	sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, PTC1023);
3262 	sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, PTC1522);
3263 	sc->stats.mptc += E1000_READ_REG(&sc->hw, MPTC);
3264 	sc->stats.bptc += E1000_READ_REG(&sc->hw, BPTC);
3265 
3266 	if (sc->hw.mac_type >= em_82543) {
3267 		sc->stats.algnerrc +=
3268 		E1000_READ_REG(&sc->hw, ALGNERRC);
3269 		sc->stats.rxerrc +=
3270 		E1000_READ_REG(&sc->hw, RXERRC);
3271 		sc->stats.tncrs +=
3272 		E1000_READ_REG(&sc->hw, TNCRS);
3273 		sc->stats.cexterr +=
3274 		E1000_READ_REG(&sc->hw, CEXTERR);
3275 		sc->stats.tsctc +=
3276 		E1000_READ_REG(&sc->hw, TSCTC);
3277 		sc->stats.tsctfc +=
3278 		E1000_READ_REG(&sc->hw, TSCTFC);
3279 	}
3280 	ifp = &sc->interface_data.ac_if;
3281 
3282 	/* Fill out the OS statistics structure */
3283 	ifp->if_collisions = sc->stats.colc;
3284 
3285 	/* Rx Errors */
3286 	ifp->if_ierrors =
3287 	    sc->dropped_pkts +
3288 	    sc->stats.rxerrc +
3289 	    sc->stats.crcerrs +
3290 	    sc->stats.algnerrc +
3291 	    sc->stats.ruc + sc->stats.roc +
3292 	    sc->stats.mpc + sc->stats.cexterr +
3293 	    sc->rx_overruns;
3294 
3295 	/* Tx Errors */
3296 	ifp->if_oerrors = sc->stats.ecol + sc->stats.latecol +
3297 	    sc->watchdog_events;
3298 }
3299 
3300 #ifdef EM_DEBUG
3301 /**********************************************************************
3302  *
3303  *  This routine is called only when IFF_DEBUG is enabled.
3304  *  This routine provides a way to take a look at important statistics
3305  *  maintained by the driver and hardware.
3306  *
3307  **********************************************************************/
3308 void
3309 em_print_hw_stats(struct em_softc *sc)
3310 {
3311 	const char * const unit = sc->sc_dv.dv_xname;
3312 
3313 	printf("%s: Excessive collisions = %lld\n", unit,
3314 		(long long)sc->stats.ecol);
3315 	printf("%s: Symbol errors = %lld\n", unit,
3316 		(long long)sc->stats.symerrs);
3317 	printf("%s: Sequence errors = %lld\n", unit,
3318 		(long long)sc->stats.sec);
3319 	printf("%s: Defer count = %lld\n", unit,
3320 		(long long)sc->stats.dc);
3321 
3322 	printf("%s: Missed Packets = %lld\n", unit,
3323 		(long long)sc->stats.mpc);
3324 	printf("%s: Receive No Buffers = %lld\n", unit,
3325 		(long long)sc->stats.rnbc);
3326 	/* RLEC is inaccurate on some hardware, calculate our own */
3327 	printf("%s: Receive Length Errors = %lld\n", unit,
3328 		((long long)sc->stats.roc +
3329 		(long long)sc->stats.ruc));
3330 	printf("%s: Receive errors = %lld\n", unit,
3331 		(long long)sc->stats.rxerrc);
3332 	printf("%s: Crc errors = %lld\n", unit,
3333 		(long long)sc->stats.crcerrs);
3334 	printf("%s: Alignment errors = %lld\n", unit,
3335 		(long long)sc->stats.algnerrc);
3336 	printf("%s: Carrier extension errors = %lld\n", unit,
3337 		(long long)sc->stats.cexterr);
3338 
3339 	printf("%s: RX overruns = %ld\n", unit,
3340 		sc->rx_overruns);
3341 	printf("%s: watchdog timeouts = %ld\n", unit,
3342 		sc->watchdog_events);
3343 
3344 	printf("%s: XON Rcvd = %lld\n", unit,
3345 		(long long)sc->stats.xonrxc);
3346 	printf("%s: XON Xmtd = %lld\n", unit,
3347 		(long long)sc->stats.xontxc);
3348 	printf("%s: XOFF Rcvd = %lld\n", unit,
3349 		(long long)sc->stats.xoffrxc);
3350 	printf("%s: XOFF Xmtd = %lld\n", unit,
3351 		(long long)sc->stats.xofftxc);
3352 
3353 	printf("%s: Good Packets Rcvd = %lld\n", unit,
3354 		(long long)sc->stats.gprc);
3355 	printf("%s: Good Packets Xmtd = %lld\n", unit,
3356 		(long long)sc->stats.gptc);
3357 }
3358 #endif
3359 #endif /* !SMALL_KERNEL */
3360