xref: /openbsd-src/sys/dev/pci/if_em.c (revision 6f05df2d9be0954bec42d51d943d77bd250fb664)
1 /**************************************************************************
2 
3 Copyright (c) 2001-2003, Intel Corporation
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15 
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19 
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31 
32 ***************************************************************************/
33 
34 /* $OpenBSD: if_em.c,v 1.289 2014/11/19 23:47:22 brad Exp $ */
35 /* $FreeBSD: if_em.c,v 1.46 2004/09/29 18:28:28 mlaier Exp $ */
36 
37 #include <dev/pci/if_em.h>
38 #include <dev/pci/if_em_soc.h>
39 
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 
44 #define EM_DRIVER_VERSION	"6.2.9"
45 
46 /*********************************************************************
47  *  PCI Device ID Table
48  *********************************************************************/
49 const struct pci_matchid em_devices[] = {
50 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_DPT },
51 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_DPT },
52 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_CPR_SPT },
53 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80003ES2LAN_SDS_SPT },
54 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM },
55 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM },
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI },
60 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE },
61 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER },
62 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM },
63 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI },
64 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_LF },
65 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE },
66 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542 },
67 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER },
68 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER },
69 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER },
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD_CPR },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_CPR_K },
86 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES },
87 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_2 },
88 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI },
89 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE },
90 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI },
91 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_AF },
92 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_AT },
93 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER },
94 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER },
95 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR },
96 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_CPR_LP },
97 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FBR },
98 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES },
99 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SDS_DUAL },
100 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SDS_QUAD },
101 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_CPR },
102 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER },
103 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER },
104 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES },
105 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI },
106 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E },
107 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT },
108 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_PM },
109 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L },
110 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L_PL_1 },
111 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L_PL_2 },
112 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573V_PM },
113 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L },
114 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA },
115 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER },
116 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_SERDES },
117 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_CPR },
118 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QP_PM },
119 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576 },
120 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER },
121 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES },
122 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER },
123 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_CU_ET2 },
124 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS },
125 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES },
126 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD },
127 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82577LC },
128 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82577LM },
129 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82578DC },
130 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82578DM },
131 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82579LM },
132 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82579V },
133 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER },
134 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER },
135 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES },
136 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII },
137 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_NF },
138 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_NF },
139 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER },
140 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM },
141 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V },
142 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM },
143 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM_2 },
144 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM_3 },
145 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V },
146 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V_2 },
147 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V_3 },
148 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER },
149 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER },
150 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES },
151 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII },
152 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL },
153 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER },
154 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII },
155 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES },
156 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE },
157 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP },
158 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V },
159 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER },
160 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER },
161 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES },
162 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII },
163 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_BP_1GBPS },
164 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_BP_2_5GBPS },
165 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I354_SGMII },
166 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_82567V_3 },
167 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE },
168 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE_G },
169 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IFE_GT },
170 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_AMT },
171 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_C },
172 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_M },
173 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH8_IGP_M_AMT },
174 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_BM },
175 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE },
176 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE_G },
177 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IFE_GT },
178 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_AMT },
179 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_C },
180 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M },
181 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M_AMT },
182 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH9_IGP_M_V },
183 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_D_BM_LF },
184 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_D_BM_LM },
185 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_LF },
186 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_LM },
187 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ICH10_R_BM_V },
188 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_1 },
189 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_2 },
190 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_EP80579_LAN_3 }
191 };
192 
193 /*********************************************************************
194  *  Function prototypes
195  *********************************************************************/
196 int  em_probe(struct device *, void *, void *);
197 void em_attach(struct device *, struct device *, void *);
198 void em_defer_attach(struct device*);
199 int  em_detach(struct device *, int);
200 int  em_activate(struct device *, int);
201 int  em_intr(void *);
202 void em_start(struct ifnet *);
203 int  em_ioctl(struct ifnet *, u_long, caddr_t);
204 void em_watchdog(struct ifnet *);
205 void em_init(void *);
206 void em_stop(void *, int);
207 void em_media_status(struct ifnet *, struct ifmediareq *);
208 int  em_media_change(struct ifnet *);
209 int  em_flowstatus(struct em_softc *);
210 void em_identify_hardware(struct em_softc *);
211 int  em_allocate_pci_resources(struct em_softc *);
212 void em_free_pci_resources(struct em_softc *);
213 void em_local_timer(void *);
214 int  em_hardware_init(struct em_softc *);
215 void em_setup_interface(struct em_softc *);
216 int  em_setup_transmit_structures(struct em_softc *);
217 void em_initialize_transmit_unit(struct em_softc *);
218 int  em_setup_receive_structures(struct em_softc *);
219 void em_initialize_receive_unit(struct em_softc *);
220 void em_enable_intr(struct em_softc *);
221 void em_disable_intr(struct em_softc *);
222 void em_free_transmit_structures(struct em_softc *);
223 void em_free_receive_structures(struct em_softc *);
224 void em_update_stats_counters(struct em_softc *);
225 void em_txeof(struct em_softc *);
226 int  em_allocate_receive_structures(struct em_softc *);
227 int  em_allocate_transmit_structures(struct em_softc *);
228 #ifdef __STRICT_ALIGNMENT
229 void em_realign(struct em_softc *, struct mbuf *, u_int16_t *);
230 #else
231 #define em_realign(a, b, c) /* a, b, c */
232 #endif
233 int  em_rxfill(struct em_softc *);
234 void em_rxeof(struct em_softc *);
235 void em_receive_checksum(struct em_softc *, struct em_rx_desc *,
236 			 struct mbuf *);
237 void em_transmit_checksum_setup(struct em_softc *, struct mbuf *,
238 				u_int32_t *, u_int32_t *);
239 void em_iff(struct em_softc *);
240 #ifdef EM_DEBUG
241 void em_print_hw_stats(struct em_softc *);
242 #endif
243 void em_update_link_status(struct em_softc *);
244 int  em_get_buf(struct em_softc *, int);
245 void em_enable_hw_vlans(struct em_softc *);
246 int  em_encap(struct em_softc *, struct mbuf *);
247 void em_smartspeed(struct em_softc *);
248 int  em_82547_fifo_workaround(struct em_softc *, int);
249 void em_82547_update_fifo_head(struct em_softc *, int);
250 int  em_82547_tx_fifo_reset(struct em_softc *);
251 void em_82547_move_tail(void *arg);
252 void em_82547_move_tail_locked(struct em_softc *);
253 int  em_dma_malloc(struct em_softc *, bus_size_t, struct em_dma_alloc *,
254 		   int);
255 void em_dma_free(struct em_softc *, struct em_dma_alloc *);
256 u_int32_t em_fill_descriptors(u_int64_t address, u_int32_t length,
257 			      PDESC_ARRAY desc_array);
258 
259 /*********************************************************************
260  *  OpenBSD Device Interface Entry Points
261  *********************************************************************/
262 
263 struct cfattach em_ca = {
264 	sizeof(struct em_softc), em_probe, em_attach, em_detach,
265 	em_activate
266 };
267 
268 struct cfdriver em_cd = {
269 	NULL, "em", DV_IFNET
270 };
271 
272 static int em_smart_pwr_down = FALSE;
273 
274 /*********************************************************************
275  *  Device identification routine
276  *
277  *  em_probe determines if the driver should be loaded on
278  *  adapter based on PCI vendor/device id of the adapter.
279  *
280  *  return 0 on no match, positive on match
281  *********************************************************************/
282 
283 int
284 em_probe(struct device *parent, void *match, void *aux)
285 {
286 	INIT_DEBUGOUT("em_probe: begin");
287 
288 	return (pci_matchbyid((struct pci_attach_args *)aux, em_devices,
289 	    nitems(em_devices)));
290 }
291 
292 void
293 em_defer_attach(struct device *self)
294 {
295 	struct em_softc *sc = (struct em_softc *)self;
296 	struct pci_attach_args *pa = &sc->osdep.em_pa;
297 	pci_chipset_tag_t	pc = pa->pa_pc;
298 	void *gcu;
299 
300 	if ((gcu = em_lookup_gcu(self)) == 0) {
301 		printf("%s: No GCU found, defered attachment failed\n",
302 		    sc->sc_dv.dv_xname);
303 
304 		if (sc->sc_intrhand)
305 			pci_intr_disestablish(pc, sc->sc_intrhand);
306 		sc->sc_intrhand = 0;
307 
308 		em_stop(sc, 1);
309 
310 		em_free_pci_resources(sc);
311 		em_dma_free(sc, &sc->rxdma);
312 		em_dma_free(sc, &sc->txdma);
313 
314 		return;
315 	}
316 
317 	sc->hw.gcu = gcu;
318 
319 	em_attach_miibus(self);
320 
321 	em_setup_interface(sc);
322 
323 	em_update_link_status(sc);
324 
325 	em_setup_link(&sc->hw);
326 }
327 
328 /*********************************************************************
329  *  Device initialization routine
330  *
331  *  The attach entry point is called when the driver is being loaded.
332  *  This routine identifies the type of hardware, allocates all resources
333  *  and initializes the hardware.
334  *
335  *********************************************************************/
336 
337 void
338 em_attach(struct device *parent, struct device *self, void *aux)
339 {
340 	struct pci_attach_args *pa = aux;
341 	struct em_softc *sc;
342 	int tsize, rsize;
343 	int defer = 0;
344 
345 	INIT_DEBUGOUT("em_attach: begin");
346 
347 	sc = (struct em_softc *)self;
348 	sc->osdep.em_pa = *pa;
349 
350 	timeout_set(&sc->timer_handle, em_local_timer, sc);
351 	timeout_set(&sc->tx_fifo_timer_handle, em_82547_move_tail, sc);
352 
353 	/* Determine hardware revision */
354 	em_identify_hardware(sc);
355 
356 	/*
357 	 * Only use MSI on the newer PCIe parts, with the exception
358 	 * of 82571/82572 due to "Byte Enables 2 and 3 Are Not Set" errata
359 	 */
360 	if (sc->hw.mac_type <= em_82572)
361 		sc->osdep.em_pa.pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
362 
363 	/* Parameters (to be read from user) */
364 	if (sc->hw.mac_type >= em_82544) {
365 		sc->num_tx_desc = EM_MAX_TXD;
366 		sc->num_rx_desc = EM_MAX_RXD;
367 	} else {
368 		sc->num_tx_desc = EM_MAX_TXD_82543;
369 		sc->num_rx_desc = EM_MAX_RXD_82543;
370 	}
371 	sc->tx_int_delay = EM_TIDV;
372 	sc->tx_abs_int_delay = EM_TADV;
373 	sc->rx_int_delay = EM_RDTR;
374 	sc->rx_abs_int_delay = EM_RADV;
375 	sc->hw.autoneg = DO_AUTO_NEG;
376 	sc->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
377 	sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
378 	sc->hw.tbi_compatibility_en = TRUE;
379 	sc->rx_buffer_len = EM_RXBUFFER_2048;
380 
381 	sc->hw.phy_init_script = 1;
382 	sc->hw.phy_reset_disable = FALSE;
383 
384 #ifndef EM_MASTER_SLAVE
385 	sc->hw.master_slave = em_ms_hw_default;
386 #else
387 	sc->hw.master_slave = EM_MASTER_SLAVE;
388 #endif
389 
390 	/*
391 	 * This controls when hardware reports transmit completion
392 	 * status.
393 	 */
394 	sc->hw.report_tx_early = 1;
395 
396 	if (em_allocate_pci_resources(sc))
397 		goto err_pci;
398 
399 	/* Initialize eeprom parameters */
400 	em_init_eeprom_params(&sc->hw);
401 
402 	/*
403 	 * Set the max frame size assuming standard Ethernet
404 	 * sized frames.
405 	 */
406 	switch (sc->hw.mac_type) {
407 		case em_82573:
408 		{
409 			uint16_t	eeprom_data = 0;
410 
411 			/*
412 			 * 82573 only supports Jumbo frames
413 			 * if ASPM is disabled.
414 			 */
415 			em_read_eeprom(&sc->hw, EEPROM_INIT_3GIO_3,
416 			    1, &eeprom_data);
417 			if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
418 				sc->hw.max_frame_size = ETHER_MAX_LEN;
419 				break;
420 			}
421 			/* Allow Jumbo frames */
422 			/* FALLTHROUGH */
423 		}
424 		case em_82571:
425 		case em_82572:
426 		case em_82574:
427 		case em_82575:
428 		case em_82580:
429 		case em_i210:
430 		case em_i350:
431 		case em_ich9lan:
432 		case em_ich10lan:
433 		case em_80003es2lan:
434 			/* Limit Jumbo Frame size */
435 			sc->hw.max_frame_size = 9234;
436 			break;
437 		case em_pchlan:
438 			sc->hw.max_frame_size = 4096;
439 			break;
440 		case em_82542_rev2_0:
441 		case em_82542_rev2_1:
442 		case em_ich8lan:
443 			/* Adapters that do not support Jumbo frames */
444 			sc->hw.max_frame_size = ETHER_MAX_LEN;
445 			break;
446 		default:
447 			sc->hw.max_frame_size =
448 			    MAX_JUMBO_FRAME_SIZE;
449 	}
450 
451 	sc->hw.min_frame_size =
452 	    ETHER_MIN_LEN + ETHER_CRC_LEN;
453 
454 	if (sc->hw.mac_type >= em_82544)
455 	    tsize = EM_ROUNDUP(sc->num_tx_desc * sizeof(struct em_tx_desc),
456 		EM_MAX_TXD * sizeof(struct em_tx_desc));
457 	else
458 	    tsize = EM_ROUNDUP(sc->num_tx_desc * sizeof(struct em_tx_desc),
459 		EM_MAX_TXD_82543 * sizeof(struct em_tx_desc));
460 	tsize = EM_ROUNDUP(tsize, PAGE_SIZE);
461 
462 	/* Allocate Transmit Descriptor ring */
463 	if (em_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) {
464 		printf("%s: Unable to allocate tx_desc memory\n",
465 		       sc->sc_dv.dv_xname);
466 		goto err_tx_desc;
467 	}
468 	sc->tx_desc_base = (struct em_tx_desc *)sc->txdma.dma_vaddr;
469 
470 	if (sc->hw.mac_type >= em_82544)
471 	    rsize = EM_ROUNDUP(sc->num_rx_desc * sizeof(struct em_rx_desc),
472 		EM_MAX_RXD * sizeof(struct em_rx_desc));
473 	else
474 	    rsize = EM_ROUNDUP(sc->num_rx_desc * sizeof(struct em_rx_desc),
475 		EM_MAX_RXD_82543 * sizeof(struct em_rx_desc));
476 	rsize = EM_ROUNDUP(rsize, PAGE_SIZE);
477 
478 	/* Allocate Receive Descriptor ring */
479 	if (em_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) {
480 		printf("%s: Unable to allocate rx_desc memory\n",
481 		       sc->sc_dv.dv_xname);
482 		goto err_rx_desc;
483 	}
484 	sc->rx_desc_base = (struct em_rx_desc *) sc->rxdma.dma_vaddr;
485 
486 	/* Initialize the hardware */
487 	if ((defer = em_hardware_init(sc))) {
488 		if (defer == EAGAIN)
489 			config_defer(self, em_defer_attach);
490 		else {
491 			printf("%s: Unable to initialize the hardware\n",
492 			    sc->sc_dv.dv_xname);
493 			goto err_hw_init;
494 		}
495 	}
496 
497 	if (sc->hw.mac_type == em_80003es2lan || sc->hw.mac_type == em_82575 ||
498 	    sc->hw.mac_type == em_82580 || sc->hw.mac_type == em_i210 ||
499 	    sc->hw.mac_type == em_i350) {
500 		uint32_t reg = EM_READ_REG(&sc->hw, E1000_STATUS);
501 		sc->hw.bus_func = (reg & E1000_STATUS_FUNC_MASK) >>
502 		    E1000_STATUS_FUNC_SHIFT;
503 
504 		switch (sc->hw.bus_func) {
505 		case 0:
506 			sc->hw.swfw = E1000_SWFW_PHY0_SM;
507 			break;
508 		case 1:
509 			sc->hw.swfw = E1000_SWFW_PHY1_SM;
510 			break;
511 		case 2:
512 			sc->hw.swfw = E1000_SWFW_PHY2_SM;
513 			break;
514 		case 3:
515 			sc->hw.swfw = E1000_SWFW_PHY3_SM;
516 			break;
517 		}
518 	} else {
519 		sc->hw.bus_func = 0;
520 	}
521 
522 	/* Copy the permanent MAC address out of the EEPROM */
523 	if (em_read_mac_addr(&sc->hw) < 0) {
524 		printf("%s: EEPROM read error while reading mac address\n",
525 		       sc->sc_dv.dv_xname);
526 		goto err_mac_addr;
527 	}
528 
529 	bcopy(sc->hw.mac_addr, sc->interface_data.ac_enaddr,
530 	    ETHER_ADDR_LEN);
531 
532 	/* Setup OS specific network interface */
533 	if (!defer)
534 		em_setup_interface(sc);
535 
536 	/* Initialize statistics */
537 	em_clear_hw_cntrs(&sc->hw);
538 #ifndef SMALL_KERNEL
539 	em_update_stats_counters(sc);
540 #endif
541 	sc->hw.get_link_status = 1;
542 	if (!defer)
543 		em_update_link_status(sc);
544 
545 	printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr));
546 
547 	/* Indicate SOL/IDER usage */
548 	if (em_check_phy_reset_block(&sc->hw))
549 		printf("%s: PHY reset is blocked due to SOL/IDER session.\n",
550 		    sc->sc_dv.dv_xname);
551 
552 	/* Identify 82544 on PCI-X */
553 	em_get_bus_info(&sc->hw);
554 	if (sc->hw.bus_type == em_bus_type_pcix &&
555 	    sc->hw.mac_type == em_82544)
556 		sc->pcix_82544 = TRUE;
557         else
558 		sc->pcix_82544 = FALSE;
559 
560 	sc->hw.icp_xxxx_is_link_up = FALSE;
561 
562 	INIT_DEBUGOUT("em_attach: end");
563 	return;
564 
565 err_mac_addr:
566 err_hw_init:
567 	em_dma_free(sc, &sc->rxdma);
568 err_rx_desc:
569 	em_dma_free(sc, &sc->txdma);
570 err_tx_desc:
571 err_pci:
572 	em_free_pci_resources(sc);
573 }
574 
575 /*********************************************************************
576  *  Transmit entry point
577  *
578  *  em_start is called by the stack to initiate a transmit.
579  *  The driver will remain in this routine as long as there are
580  *  packets to transmit and transmit resources are available.
581  *  In case resources are not available stack is notified and
582  *  the packet is requeued.
583  **********************************************************************/
584 
585 void
586 em_start(struct ifnet *ifp)
587 {
588 	struct mbuf    *m_head;
589 	struct em_softc *sc = ifp->if_softc;
590 	int		post = 0;
591 
592 	if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
593 		return;
594 
595 	if (!sc->link_active)
596 		return;
597 
598 	if (sc->hw.mac_type != em_82547) {
599 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
600 		    sc->txdma.dma_map->dm_mapsize,
601 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
602 	}
603 
604 	for (;;) {
605 		IFQ_POLL(&ifp->if_snd, m_head);
606 		if (m_head == NULL)
607 			break;
608 
609 		if (em_encap(sc, m_head)) {
610 			ifp->if_flags |= IFF_OACTIVE;
611 			break;
612 		}
613 
614 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
615 
616 #if NBPFILTER > 0
617 		/* Send a copy of the frame to the BPF listener */
618 		if (ifp->if_bpf)
619 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
620 #endif
621 
622 		/* Set timeout in case hardware has problems transmitting */
623 		ifp->if_timer = EM_TX_TIMEOUT;
624 
625 		post = 1;
626 	}
627 
628 	if (sc->hw.mac_type != em_82547) {
629 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
630 		    sc->txdma.dma_map->dm_mapsize,
631 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
632 		/*
633 		 * Advance the Transmit Descriptor Tail (Tdt),
634 		 * this tells the E1000 that this frame is
635 		 * available to transmit.
636 		 */
637 		if (post)
638 			E1000_WRITE_REG(&sc->hw, TDT, sc->next_avail_tx_desc);
639 	}
640 }
641 
642 /*********************************************************************
643  *  Ioctl entry point
644  *
645  *  em_ioctl is called when the user wants to configure the
646  *  interface.
647  *
648  *  return 0 on success, positive on failure
649  **********************************************************************/
650 
651 int
652 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
653 {
654 	int		error = 0;
655 	struct ifreq   *ifr = (struct ifreq *) data;
656 	struct ifaddr  *ifa = (struct ifaddr *)data;
657 	struct em_softc *sc = ifp->if_softc;
658 	int s;
659 
660 	s = splnet();
661 
662 	switch (command) {
663 	case SIOCSIFADDR:
664 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface "
665 			       "Addr)");
666 		if (!(ifp->if_flags & IFF_UP)) {
667 			ifp->if_flags |= IFF_UP;
668 			em_init(sc);
669 		}
670 #ifdef INET
671 		if (ifa->ifa_addr->sa_family == AF_INET)
672 			arp_ifinit(&sc->interface_data, ifa);
673 #endif /* INET */
674 		break;
675 
676 	case SIOCSIFFLAGS:
677 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
678 		if (ifp->if_flags & IFF_UP) {
679 			if (ifp->if_flags & IFF_RUNNING)
680 				error = ENETRESET;
681 			else
682 				em_init(sc);
683 		} else {
684 			if (ifp->if_flags & IFF_RUNNING)
685 				em_stop(sc, 0);
686 		}
687 		break;
688 
689 	case SIOCSIFMEDIA:
690 		/* Check SOL/IDER usage */
691 		if (em_check_phy_reset_block(&sc->hw)) {
692 			printf("%s: Media change is blocked due to SOL/IDER session.\n",
693 			    sc->sc_dv.dv_xname);
694 			break;
695 		}
696 	case SIOCGIFMEDIA:
697 		IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
698 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
699 		break;
700 
701 	case SIOCGIFRXR:
702 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
703 		    NULL, MCLBYTES, &sc->rx_ring);
704 		break;
705 
706 	default:
707 		error = ether_ioctl(ifp, &sc->interface_data, command, data);
708 	}
709 
710 	if (error == ENETRESET) {
711 		if (ifp->if_flags & IFF_RUNNING) {
712 			em_disable_intr(sc);
713 			em_iff(sc);
714 			if (sc->hw.mac_type == em_82542_rev2_0)
715 				em_initialize_receive_unit(sc);
716 			em_enable_intr(sc);
717 		}
718 		error = 0;
719 	}
720 
721 	splx(s);
722 	return (error);
723 }
724 
725 /*********************************************************************
726  *  Watchdog entry point
727  *
728  *  This routine is called whenever hardware quits transmitting.
729  *
730  **********************************************************************/
731 
732 void
733 em_watchdog(struct ifnet *ifp)
734 {
735 	struct em_softc *sc = ifp->if_softc;
736 
737 	/* If we are in this routine because of pause frames, then
738 	 * don't reset the hardware.
739 	 */
740 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_TXOFF) {
741 		ifp->if_timer = EM_TX_TIMEOUT;
742 		return;
743 	}
744 	printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname);
745 
746 	em_init(sc);
747 
748 	sc->watchdog_events++;
749 }
750 
751 /*********************************************************************
752  *  Init entry point
753  *
754  *  This routine is used in two ways. It is used by the stack as
755  *  init entry point in network interface structure. It is also used
756  *  by the driver as a hw/sw initialization routine to get to a
757  *  consistent state.
758  *
759  **********************************************************************/
760 
761 void
762 em_init(void *arg)
763 {
764 	struct em_softc *sc = arg;
765 	struct ifnet   *ifp = &sc->interface_data.ac_if;
766 	uint32_t	pba;
767 	int s;
768 
769 	s = splnet();
770 
771 	INIT_DEBUGOUT("em_init: begin");
772 
773 	em_stop(sc, 0);
774 
775 	/*
776 	 * Packet Buffer Allocation (PBA)
777 	 * Writing PBA sets the receive portion of the buffer
778 	 * the remainder is used for the transmit buffer.
779 	 *
780 	 * Devices before the 82547 had a Packet Buffer of 64K.
781 	 *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
782 	 * After the 82547 the buffer was reduced to 40K.
783 	 *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
784 	 *   Note: default does not leave enough room for Jumbo Frame >10k.
785 	 */
786 	switch (sc->hw.mac_type) {
787 	case em_82547:
788 	case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
789 		if (sc->hw.max_frame_size > EM_RXBUFFER_8192)
790 			pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
791 		else
792 			pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
793 		sc->tx_fifo_head = 0;
794 		sc->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
795 		sc->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
796 		break;
797 	case em_82571:
798 	case em_82572: /* Total Packet Buffer on these is 48k */
799 	case em_82575:
800 	case em_82580:
801 	case em_80003es2lan:
802 	case em_i350:
803 		pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
804 		break;
805 	case em_i210:
806 		pba = E1000_PBA_34K;
807 		break;
808 	case em_82573: /* 82573: Total Packet Buffer is 32K */
809 		/* Jumbo frames not supported */
810 		pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
811 		break;
812 	case em_82574: /* Total Packet Buffer is 40k */
813 		pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
814 		break;
815 	case em_ich8lan:
816 		pba = E1000_PBA_8K;
817 		break;
818 	case em_ich9lan:
819 	case em_ich10lan:
820 		pba = E1000_PBA_10K;
821 		break;
822 	case em_pchlan:
823 	case em_pch2lan:
824 	case em_pch_lpt:
825 		pba = E1000_PBA_26K;
826 		break;
827 	default:
828 		/* Devices before 82547 had a Packet Buffer of 64K.   */
829 		if (sc->hw.max_frame_size > EM_RXBUFFER_8192)
830 			pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
831 		else
832 			pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
833 	}
834 	INIT_DEBUGOUT1("em_init: pba=%dK",pba);
835 	E1000_WRITE_REG(&sc->hw, PBA, pba);
836 
837 	/* Get the latest mac address, User can use a LAA */
838 	bcopy(sc->interface_data.ac_enaddr, sc->hw.mac_addr,
839 	      ETHER_ADDR_LEN);
840 
841 	/* Initialize the hardware */
842 	if (em_hardware_init(sc)) {
843 		printf("%s: Unable to initialize the hardware\n",
844 		       sc->sc_dv.dv_xname);
845 		splx(s);
846 		return;
847 	}
848 	em_update_link_status(sc);
849 
850 	E1000_WRITE_REG(&sc->hw, VET, ETHERTYPE_VLAN);
851 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
852 		em_enable_hw_vlans(sc);
853 
854 	/* Prepare transmit descriptors and buffers */
855 	if (em_setup_transmit_structures(sc)) {
856 		printf("%s: Could not setup transmit structures\n",
857 		       sc->sc_dv.dv_xname);
858 		em_stop(sc, 0);
859 		splx(s);
860 		return;
861 	}
862 	em_initialize_transmit_unit(sc);
863 
864 	/* Prepare receive descriptors and buffers */
865 	if (em_setup_receive_structures(sc)) {
866 		printf("%s: Could not setup receive structures\n",
867 		       sc->sc_dv.dv_xname);
868 		em_stop(sc, 0);
869 		splx(s);
870 		return;
871 	}
872 	em_initialize_receive_unit(sc);
873 
874 	/* Program promiscuous mode and multicast filters. */
875 	em_iff(sc);
876 
877 	ifp->if_flags |= IFF_RUNNING;
878 	ifp->if_flags &= ~IFF_OACTIVE;
879 
880 	timeout_add_sec(&sc->timer_handle, 1);
881 	em_clear_hw_cntrs(&sc->hw);
882 	em_enable_intr(sc);
883 
884 	/* Don't reset the phy next time init gets called */
885 	sc->hw.phy_reset_disable = TRUE;
886 
887 	splx(s);
888 }
889 
890 /*********************************************************************
891  *
892  *  Interrupt Service routine
893  *
894  **********************************************************************/
895 int
896 em_intr(void *arg)
897 {
898 	struct em_softc	*sc = arg;
899 	struct ifnet	*ifp = &sc->interface_data.ac_if;
900 	u_int32_t	reg_icr, test_icr;
901 	int		refill = 0;
902 
903 	test_icr = reg_icr = E1000_READ_REG(&sc->hw, ICR);
904 	if (sc->hw.mac_type >= em_82571)
905 		test_icr = (reg_icr & E1000_ICR_INT_ASSERTED);
906 	if (!test_icr)
907 		return (0);
908 
909 	if (ifp->if_flags & IFF_RUNNING) {
910 		em_rxeof(sc);
911 		em_txeof(sc);
912 		refill = 1;
913 	}
914 
915 	/* Link status change */
916 	if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
917 		sc->hw.get_link_status = 1;
918 		em_check_for_link(&sc->hw);
919 		em_update_link_status(sc);
920 	}
921 
922 	if (reg_icr & E1000_ICR_RXO) {
923 		sc->rx_overruns++;
924 		refill = 1;
925 	}
926 
927 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
928 		em_start(ifp);
929 
930 	if (refill && em_rxfill(sc)) {
931 		/* Advance the Rx Queue #0 "Tail Pointer". */
932 		E1000_WRITE_REG(&sc->hw, RDT, sc->last_rx_desc_filled);
933 	}
934 
935 	return (1);
936 }
937 
938 /*********************************************************************
939  *
940  *  Media Ioctl callback
941  *
942  *  This routine is called whenever the user queries the status of
943  *  the interface using ifconfig.
944  *
945  **********************************************************************/
946 void
947 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
948 {
949 	struct em_softc *sc = ifp->if_softc;
950 	u_char fiber_type = IFM_1000_SX;
951 	u_int16_t gsr;
952 
953 	INIT_DEBUGOUT("em_media_status: begin");
954 
955 	em_check_for_link(&sc->hw);
956 	em_update_link_status(sc);
957 
958 	ifmr->ifm_status = IFM_AVALID;
959 	ifmr->ifm_active = IFM_ETHER;
960 
961 	if (!sc->link_active) {
962 		ifmr->ifm_active |= IFM_NONE;
963 		return;
964 	}
965 
966 	ifmr->ifm_status |= IFM_ACTIVE;
967 
968 	if (sc->hw.media_type == em_media_type_fiber ||
969 	    sc->hw.media_type == em_media_type_internal_serdes) {
970 		if (sc->hw.mac_type == em_82545)
971 			fiber_type = IFM_1000_LX;
972 		ifmr->ifm_active |= fiber_type | IFM_FDX;
973 	} else {
974 		switch (sc->link_speed) {
975 		case 10:
976 			ifmr->ifm_active |= IFM_10_T;
977 			break;
978 		case 100:
979 			ifmr->ifm_active |= IFM_100_TX;
980 			break;
981 		case 1000:
982 			ifmr->ifm_active |= IFM_1000_T;
983 			break;
984 		}
985 
986 		if (sc->link_duplex == FULL_DUPLEX)
987 			ifmr->ifm_active |= em_flowstatus(sc) | IFM_FDX;
988 		else
989 			ifmr->ifm_active |= IFM_HDX;
990 
991 		if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_1000_T) {
992 			em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &gsr);
993 			if (gsr & SR_1000T_MS_CONFIG_RES)
994 				ifmr->ifm_active |= IFM_ETH_MASTER;
995 		}
996 	}
997 }
998 
999 /*********************************************************************
1000  *
1001  *  Media Ioctl callback
1002  *
1003  *  This routine is called when the user changes speed/duplex using
1004  *  media/mediopt option with ifconfig.
1005  *
1006  **********************************************************************/
1007 int
1008 em_media_change(struct ifnet *ifp)
1009 {
1010 	struct em_softc *sc = ifp->if_softc;
1011 	struct ifmedia	*ifm = &sc->media;
1012 
1013 	INIT_DEBUGOUT("em_media_change: begin");
1014 
1015 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1016 		return (EINVAL);
1017 
1018 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1019 	case IFM_AUTO:
1020 		sc->hw.autoneg = DO_AUTO_NEG;
1021 		sc->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1022 		break;
1023 	case IFM_1000_LX:
1024 	case IFM_1000_SX:
1025 	case IFM_1000_T:
1026 		sc->hw.autoneg = DO_AUTO_NEG;
1027 		sc->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1028 		break;
1029 	case IFM_100_TX:
1030 		sc->hw.autoneg = FALSE;
1031 		sc->hw.autoneg_advertised = 0;
1032 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1033 			sc->hw.forced_speed_duplex = em_100_full;
1034 		else
1035 			sc->hw.forced_speed_duplex = em_100_half;
1036 		break;
1037 	case IFM_10_T:
1038 		sc->hw.autoneg = FALSE;
1039 		sc->hw.autoneg_advertised = 0;
1040 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1041 			sc->hw.forced_speed_duplex = em_10_full;
1042 		else
1043 			sc->hw.forced_speed_duplex = em_10_half;
1044 		break;
1045 	default:
1046 		printf("%s: Unsupported media type\n", sc->sc_dv.dv_xname);
1047 	}
1048 
1049 	/*
1050 	 * As the speed/duplex settings may have changed we need to
1051 	 * reset the PHY.
1052 	 */
1053 	sc->hw.phy_reset_disable = FALSE;
1054 
1055 	em_init(sc);
1056 
1057 	return (0);
1058 }
1059 
1060 int
1061 em_flowstatus(struct em_softc *sc)
1062 {
1063 	u_int16_t ar, lpar;
1064 
1065 	if (sc->hw.media_type == em_media_type_fiber ||
1066 	    sc->hw.media_type == em_media_type_internal_serdes)
1067 		return (0);
1068 
1069 	em_read_phy_reg(&sc->hw, PHY_AUTONEG_ADV, &ar);
1070 	em_read_phy_reg(&sc->hw, PHY_LP_ABILITY, &lpar);
1071 
1072 	if ((ar & NWAY_AR_PAUSE) && (lpar & NWAY_LPAR_PAUSE))
1073 		return (IFM_FLOW|IFM_ETH_TXPAUSE|IFM_ETH_RXPAUSE);
1074 	else if (!(ar & NWAY_AR_PAUSE) && (ar & NWAY_AR_ASM_DIR) &&
1075 		(lpar & NWAY_LPAR_PAUSE) && (lpar & NWAY_LPAR_ASM_DIR))
1076 		return (IFM_FLOW|IFM_ETH_TXPAUSE);
1077 	else if ((ar & NWAY_AR_PAUSE) && (ar & NWAY_AR_ASM_DIR) &&
1078 		!(lpar & NWAY_LPAR_PAUSE) && (lpar & NWAY_LPAR_ASM_DIR))
1079 		return (IFM_FLOW|IFM_ETH_RXPAUSE);
1080 
1081 	return (0);
1082 }
1083 
1084 /*********************************************************************
1085  *
1086  *  This routine maps the mbufs to tx descriptors.
1087  *
1088  *  return 0 on success, positive on failure
1089  **********************************************************************/
1090 int
1091 em_encap(struct em_softc *sc, struct mbuf *m_head)
1092 {
1093 	u_int32_t	txd_upper;
1094 	u_int32_t	txd_lower, txd_used = 0, txd_saved = 0;
1095 	int		i, j, first, error = 0, last = 0;
1096 	bus_dmamap_t	map;
1097 
1098 	/* For 82544 Workaround */
1099 	DESC_ARRAY		desc_array;
1100 	u_int32_t		array_elements;
1101 	u_int32_t		counter;
1102 
1103 	struct em_buffer   *tx_buffer, *tx_buffer_mapped;
1104 	struct em_tx_desc *current_tx_desc = NULL;
1105 
1106 	/*
1107 	 * Force a cleanup if number of TX descriptors
1108 	 * available hits the threshold
1109 	 */
1110 	if (sc->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1111 		em_txeof(sc);
1112 		/* Now do we at least have a minimal? */
1113 		if (sc->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1114 			sc->no_tx_desc_avail1++;
1115 			return (ENOBUFS);
1116 		}
1117 	}
1118 
1119 	if (sc->hw.mac_type == em_82547) {
1120 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1121 		    sc->txdma.dma_map->dm_mapsize,
1122 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1123 	}
1124 
1125 	/*
1126 	 * Map the packet for DMA.
1127 	 *
1128 	 * Capture the first descriptor index,
1129 	 * this descriptor will have the index
1130 	 * of the EOP which is the only one that
1131 	 * no gets a DONE bit writeback.
1132 	 */
1133 	first = sc->next_avail_tx_desc;
1134 	tx_buffer = &sc->tx_buffer_area[first];
1135 	tx_buffer_mapped = tx_buffer;
1136 	map = tx_buffer->map;
1137 
1138 	error = bus_dmamap_load_mbuf(sc->txtag, map, m_head, BUS_DMA_NOWAIT);
1139 	switch (error) {
1140 	case 0:
1141 		break;
1142 	case EFBIG:
1143 		if ((error = m_defrag(m_head, M_DONTWAIT)) == 0 &&
1144 		    (error = bus_dmamap_load_mbuf(sc->txtag, map, m_head,
1145 		     BUS_DMA_NOWAIT)) == 0)
1146 			break;
1147 
1148 		/* FALLTHROUGH */
1149 	default:
1150 		sc->no_tx_dma_setup++;
1151 		goto loaderr;
1152 	}
1153 
1154 	EM_KASSERT(map->dm_nsegs!= 0, ("em_encap: empty packet"));
1155 
1156 	if (map->dm_nsegs > sc->num_tx_desc_avail - 2)
1157 		goto fail;
1158 
1159 	if (sc->hw.mac_type >= em_82543 && sc->hw.mac_type != em_82575 &&
1160 	    sc->hw.mac_type != em_82580 && sc->hw.mac_type != em_i210 &&
1161 	    sc->hw.mac_type != em_i350)
1162 		em_transmit_checksum_setup(sc, m_head, &txd_upper, &txd_lower);
1163 	else
1164 		txd_upper = txd_lower = 0;
1165 
1166 	i = sc->next_avail_tx_desc;
1167 	if (sc->pcix_82544)
1168 		txd_saved = i;
1169 
1170 	for (j = 0; j < map->dm_nsegs; j++) {
1171 		/* If sc is 82544 and on PCI-X bus */
1172 		if (sc->pcix_82544) {
1173 			/*
1174 			 * Check the Address and Length combination and
1175 			 * split the data accordingly
1176 			 */
1177 			array_elements = em_fill_descriptors(map->dm_segs[j].ds_addr,
1178 							     map->dm_segs[j].ds_len,
1179 							     &desc_array);
1180 			for (counter = 0; counter < array_elements; counter++) {
1181 				if (txd_used == sc->num_tx_desc_avail) {
1182 					sc->next_avail_tx_desc = txd_saved;
1183 					goto fail;
1184 				}
1185 				tx_buffer = &sc->tx_buffer_area[i];
1186 				current_tx_desc = &sc->tx_desc_base[i];
1187 				current_tx_desc->buffer_addr = htole64(
1188 					desc_array.descriptor[counter].address);
1189 				current_tx_desc->lower.data = htole32(
1190 					(sc->txd_cmd | txd_lower |
1191 					 (u_int16_t)desc_array.descriptor[counter].length));
1192 				current_tx_desc->upper.data = htole32((txd_upper));
1193 				last = i;
1194 				if (++i == sc->num_tx_desc)
1195 					i = 0;
1196 
1197 				tx_buffer->m_head = NULL;
1198 				tx_buffer->next_eop = -1;
1199 				txd_used++;
1200 			}
1201 		} else {
1202 			tx_buffer = &sc->tx_buffer_area[i];
1203 			current_tx_desc = &sc->tx_desc_base[i];
1204 
1205 			current_tx_desc->buffer_addr = htole64(map->dm_segs[j].ds_addr);
1206 			current_tx_desc->lower.data = htole32(
1207 				sc->txd_cmd | txd_lower | map->dm_segs[j].ds_len);
1208 			current_tx_desc->upper.data = htole32(txd_upper);
1209 			last = i;
1210 			if (++i == sc->num_tx_desc)
1211 	        		i = 0;
1212 
1213 			tx_buffer->m_head = NULL;
1214 			tx_buffer->next_eop = -1;
1215 		}
1216 	}
1217 
1218 	sc->next_avail_tx_desc = i;
1219 	if (sc->pcix_82544)
1220 		sc->num_tx_desc_avail -= txd_used;
1221 	else
1222 		sc->num_tx_desc_avail -= map->dm_nsegs;
1223 
1224 #if NVLAN > 0
1225 	/* Find out if we are in VLAN mode */
1226 	if (m_head->m_flags & M_VLANTAG) {
1227 		/* Set the VLAN id */
1228 		current_tx_desc->upper.fields.special =
1229 			htole16(m_head->m_pkthdr.ether_vtag);
1230 
1231 		/* Tell hardware to add tag */
1232 		current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1233 	}
1234 #endif
1235 
1236 	tx_buffer->m_head = m_head;
1237 	tx_buffer_mapped->map = tx_buffer->map;
1238 	tx_buffer->map = map;
1239 	bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize,
1240 	    BUS_DMASYNC_PREWRITE);
1241 
1242 	/*
1243 	 * Last Descriptor of Packet
1244 	 * needs End Of Packet (EOP)
1245 	 * and Report Status (RS)
1246 	 */
1247 	current_tx_desc->lower.data |=
1248 	    htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1249 
1250 	/*
1251 	 * Keep track in the first buffer which
1252 	 * descriptor will be written back
1253 	 */
1254 	tx_buffer = &sc->tx_buffer_area[first];
1255 	tx_buffer->next_eop = last;
1256 
1257 	/*
1258 	 * Advance the Transmit Descriptor Tail (Tdt),
1259 	 * this tells the E1000 that this frame is
1260 	 * available to transmit.
1261 	 */
1262 	if (sc->hw.mac_type == em_82547) {
1263 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1264 		    sc->txdma.dma_map->dm_mapsize,
1265 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1266 		if (sc->link_duplex == HALF_DUPLEX)
1267 			em_82547_move_tail_locked(sc);
1268 		else {
1269 			E1000_WRITE_REG(&sc->hw, TDT, i);
1270 			em_82547_update_fifo_head(sc, m_head->m_pkthdr.len);
1271 		}
1272 	}
1273 
1274 	return (0);
1275 
1276 fail:
1277 	sc->no_tx_desc_avail2++;
1278 	bus_dmamap_unload(sc->txtag, map);
1279 	error = ENOBUFS;
1280 loaderr:
1281 	if (sc->hw.mac_type == em_82547) {
1282 		bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
1283 		    sc->txdma.dma_map->dm_mapsize,
1284 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1285 	}
1286 	return (error);
1287 }
1288 
1289 /*********************************************************************
1290  *
1291  * 82547 workaround to avoid controller hang in half-duplex environment.
1292  * The workaround is to avoid queuing a large packet that would span
1293  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1294  * in this case. We do that only when FIFO is quiescent.
1295  *
1296  **********************************************************************/
1297 void
1298 em_82547_move_tail_locked(struct em_softc *sc)
1299 {
1300 	uint16_t hw_tdt;
1301 	uint16_t sw_tdt;
1302 	struct em_tx_desc *tx_desc;
1303 	uint16_t length = 0;
1304 	boolean_t eop = 0;
1305 
1306 	hw_tdt = E1000_READ_REG(&sc->hw, TDT);
1307 	sw_tdt = sc->next_avail_tx_desc;
1308 
1309 	while (hw_tdt != sw_tdt) {
1310 		tx_desc = &sc->tx_desc_base[hw_tdt];
1311 		length += tx_desc->lower.flags.length;
1312 		eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1313 		if (++hw_tdt == sc->num_tx_desc)
1314 			hw_tdt = 0;
1315 
1316 		if (eop) {
1317 			if (em_82547_fifo_workaround(sc, length)) {
1318 				sc->tx_fifo_wrk_cnt++;
1319 				timeout_add(&sc->tx_fifo_timer_handle, 1);
1320 				break;
1321 			}
1322 			E1000_WRITE_REG(&sc->hw, TDT, hw_tdt);
1323 			em_82547_update_fifo_head(sc, length);
1324 			length = 0;
1325 		}
1326 	}
1327 }
1328 
1329 void
1330 em_82547_move_tail(void *arg)
1331 {
1332 	struct em_softc *sc = arg;
1333 	int s;
1334 
1335 	s = splnet();
1336 	em_82547_move_tail_locked(sc);
1337 	splx(s);
1338 }
1339 
1340 int
1341 em_82547_fifo_workaround(struct em_softc *sc, int len)
1342 {
1343 	int fifo_space, fifo_pkt_len;
1344 
1345 	fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1346 
1347 	if (sc->link_duplex == HALF_DUPLEX) {
1348 		fifo_space = sc->tx_fifo_size - sc->tx_fifo_head;
1349 
1350 		if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1351 			if (em_82547_tx_fifo_reset(sc))
1352 				return (0);
1353 			else
1354 				return (1);
1355 		}
1356 	}
1357 
1358 	return (0);
1359 }
1360 
1361 void
1362 em_82547_update_fifo_head(struct em_softc *sc, int len)
1363 {
1364 	int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1365 
1366 	/* tx_fifo_head is always 16 byte aligned */
1367 	sc->tx_fifo_head += fifo_pkt_len;
1368 	if (sc->tx_fifo_head >= sc->tx_fifo_size)
1369 		sc->tx_fifo_head -= sc->tx_fifo_size;
1370 }
1371 
1372 int
1373 em_82547_tx_fifo_reset(struct em_softc *sc)
1374 {
1375 	uint32_t tctl;
1376 
1377 	if ((E1000_READ_REG(&sc->hw, TDT) ==
1378 	     E1000_READ_REG(&sc->hw, TDH)) &&
1379 	    (E1000_READ_REG(&sc->hw, TDFT) ==
1380 	     E1000_READ_REG(&sc->hw, TDFH)) &&
1381 	    (E1000_READ_REG(&sc->hw, TDFTS) ==
1382 	     E1000_READ_REG(&sc->hw, TDFHS)) &&
1383 	    (E1000_READ_REG(&sc->hw, TDFPC) == 0)) {
1384 
1385 		/* Disable TX unit */
1386 		tctl = E1000_READ_REG(&sc->hw, TCTL);
1387 		E1000_WRITE_REG(&sc->hw, TCTL, tctl & ~E1000_TCTL_EN);
1388 
1389 		/* Reset FIFO pointers */
1390 		E1000_WRITE_REG(&sc->hw, TDFT, sc->tx_head_addr);
1391 		E1000_WRITE_REG(&sc->hw, TDFH, sc->tx_head_addr);
1392 		E1000_WRITE_REG(&sc->hw, TDFTS, sc->tx_head_addr);
1393 		E1000_WRITE_REG(&sc->hw, TDFHS, sc->tx_head_addr);
1394 
1395 		/* Re-enable TX unit */
1396 		E1000_WRITE_REG(&sc->hw, TCTL, tctl);
1397 		E1000_WRITE_FLUSH(&sc->hw);
1398 
1399 		sc->tx_fifo_head = 0;
1400 		sc->tx_fifo_reset_cnt++;
1401 
1402 		return (TRUE);
1403 	} else
1404 		return (FALSE);
1405 }
1406 
1407 void
1408 em_iff(struct em_softc *sc)
1409 {
1410 	struct ifnet *ifp = &sc->interface_data.ac_if;
1411 	struct arpcom *ac = &sc->interface_data;
1412 	u_int32_t reg_rctl = 0;
1413 	u_int8_t  mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1414 	struct ether_multi *enm;
1415 	struct ether_multistep step;
1416 	int i = 0;
1417 
1418 	IOCTL_DEBUGOUT("em_iff: begin");
1419 
1420 	if (sc->hw.mac_type == em_82542_rev2_0) {
1421 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1422 		if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1423 			em_pci_clear_mwi(&sc->hw);
1424 		reg_rctl |= E1000_RCTL_RST;
1425 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1426 		msec_delay(5);
1427 	}
1428 
1429 	reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1430 	reg_rctl &= ~(E1000_RCTL_MPE | E1000_RCTL_UPE);
1431 	ifp->if_flags &= ~IFF_ALLMULTI;
1432 
1433 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1434 	    ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1435 		ifp->if_flags |= IFF_ALLMULTI;
1436 		reg_rctl |= E1000_RCTL_MPE;
1437 		if (ifp->if_flags & IFF_PROMISC)
1438 			reg_rctl |= E1000_RCTL_UPE;
1439 	} else {
1440 		ETHER_FIRST_MULTI(step, ac, enm);
1441 		while (enm != NULL) {
1442 			bcopy(enm->enm_addrlo, mta + i, ETH_LENGTH_OF_ADDRESS);
1443 			i += ETH_LENGTH_OF_ADDRESS;
1444 
1445 			ETHER_NEXT_MULTI(step, enm);
1446 		}
1447 
1448 		em_mc_addr_list_update(&sc->hw, mta, ac->ac_multicnt, 0, 1);
1449 	}
1450 
1451 	E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1452 
1453 	if (sc->hw.mac_type == em_82542_rev2_0) {
1454 		reg_rctl = E1000_READ_REG(&sc->hw, RCTL);
1455 		reg_rctl &= ~E1000_RCTL_RST;
1456 		E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
1457 		msec_delay(5);
1458 		if (sc->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1459 			em_pci_set_mwi(&sc->hw);
1460 	}
1461 }
1462 
1463 /*********************************************************************
1464  *  Timer routine
1465  *
1466  *  This routine checks for link status and updates statistics.
1467  *
1468  **********************************************************************/
1469 
1470 void
1471 em_local_timer(void *arg)
1472 {
1473 	struct ifnet   *ifp;
1474 	struct em_softc *sc = arg;
1475 	int s;
1476 
1477 	ifp = &sc->interface_data.ac_if;
1478 
1479 	s = splnet();
1480 
1481 #ifndef SMALL_KERNEL
1482 	em_update_stats_counters(sc);
1483 #ifdef EM_DEBUG
1484 	if (ifp->if_flags & IFF_DEBUG && ifp->if_flags & IFF_RUNNING)
1485 		em_print_hw_stats(sc);
1486 #endif
1487 #endif
1488 	em_smartspeed(sc);
1489 
1490 	timeout_add_sec(&sc->timer_handle, 1);
1491 
1492 	splx(s);
1493 }
1494 
1495 void
1496 em_update_link_status(struct em_softc *sc)
1497 {
1498 	struct ifnet *ifp = &sc->interface_data.ac_if;
1499 
1500 	if (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU) {
1501 		if (sc->link_active == 0) {
1502 			em_get_speed_and_duplex(&sc->hw,
1503 						&sc->link_speed,
1504 						&sc->link_duplex);
1505 			/* Check if we may set SPEED_MODE bit on PCI-E */
1506 			if ((sc->link_speed == SPEED_1000) &&
1507 			    ((sc->hw.mac_type == em_82571) ||
1508 			    (sc->hw.mac_type == em_82572) ||
1509 			    (sc->hw.mac_type == em_82575) ||
1510 			    (sc->hw.mac_type == em_82580))) {
1511 				int tarc0;
1512 
1513 				tarc0 = E1000_READ_REG(&sc->hw, TARC0);
1514 				tarc0 |= SPEED_MODE_BIT;
1515 				E1000_WRITE_REG(&sc->hw, TARC0, tarc0);
1516 			}
1517 			sc->link_active = 1;
1518 			sc->smartspeed = 0;
1519 			ifp->if_baudrate = IF_Mbps(sc->link_speed);
1520 		}
1521 		if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
1522 			if (sc->link_duplex == FULL_DUPLEX)
1523 				ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1524 			else
1525 				ifp->if_link_state = LINK_STATE_HALF_DUPLEX;
1526 			if_link_state_change(ifp);
1527 		}
1528 	} else {
1529 		if (sc->link_active == 1) {
1530 			ifp->if_baudrate = sc->link_speed = 0;
1531 			sc->link_duplex = 0;
1532 			sc->link_active = 0;
1533 		}
1534 		if (ifp->if_link_state != LINK_STATE_DOWN) {
1535 			ifp->if_link_state = LINK_STATE_DOWN;
1536 			if_link_state_change(ifp);
1537 		}
1538 	}
1539 }
1540 
1541 /*********************************************************************
1542  *
1543  *  This routine disables all traffic on the adapter by issuing a
1544  *  global reset on the MAC and deallocates TX/RX buffers.
1545  *
1546  **********************************************************************/
1547 
1548 void
1549 em_stop(void *arg, int softonly)
1550 {
1551 	struct em_softc *sc = arg;
1552 	struct ifnet   *ifp = &sc->interface_data.ac_if;
1553 
1554 	/* Tell the stack that the interface is no longer active */
1555 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1556 	ifp->if_timer = 0;
1557 
1558 	INIT_DEBUGOUT("em_stop: begin");
1559 
1560 	timeout_del(&sc->timer_handle);
1561 	timeout_del(&sc->tx_fifo_timer_handle);
1562 
1563 	if (!softonly) {
1564 		em_disable_intr(sc);
1565 		em_reset_hw(&sc->hw);
1566 	}
1567 
1568 	em_free_transmit_structures(sc);
1569 	em_free_receive_structures(sc);
1570 }
1571 
1572 /*********************************************************************
1573  *
1574  *  Determine hardware revision.
1575  *
1576  **********************************************************************/
1577 void
1578 em_identify_hardware(struct em_softc *sc)
1579 {
1580 	u_int32_t reg;
1581 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1582 
1583 	/* Make sure our PCI config space has the necessary stuff set */
1584 	sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag,
1585 					    PCI_COMMAND_STATUS_REG);
1586 
1587 	/* Save off the information about this board */
1588 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1589 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1590 
1591 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1592 	sc->hw.revision_id = PCI_REVISION(reg);
1593 
1594 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1595 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1596 	sc->hw.subsystem_id = PCI_PRODUCT(reg);
1597 
1598 	/* Identify the MAC */
1599 	if (em_set_mac_type(&sc->hw))
1600 		printf("%s: Unknown MAC Type\n", sc->sc_dv.dv_xname);
1601 
1602 	if (sc->hw.mac_type == em_pchlan)
1603 		sc->hw.revision_id = PCI_PRODUCT(pa->pa_id) & 0x0f;
1604 
1605 	if (sc->hw.mac_type == em_82541 ||
1606 	    sc->hw.mac_type == em_82541_rev_2 ||
1607 	    sc->hw.mac_type == em_82547 ||
1608 	    sc->hw.mac_type == em_82547_rev_2)
1609 		sc->hw.phy_init_script = TRUE;
1610 }
1611 
1612 int
1613 em_allocate_pci_resources(struct em_softc *sc)
1614 {
1615 	int		val, rid;
1616 	pci_intr_handle_t	ih;
1617 	const char		*intrstr = NULL;
1618 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1619 	pci_chipset_tag_t	pc = pa->pa_pc;
1620 
1621 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_MMBA);
1622 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1623 		printf(": mmba is not mem space\n");
1624 		return (ENXIO);
1625 	}
1626 	if (pci_mapreg_map(pa, EM_MMBA, PCI_MAPREG_MEM_TYPE(val), 0,
1627 	    &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle,
1628 	    &sc->osdep.em_membase, &sc->osdep.em_memsize, 0)) {
1629 		printf(": cannot find mem space\n");
1630 		return (ENXIO);
1631 	}
1632 
1633 	switch (sc->hw.mac_type) {
1634 	case em_82544:
1635 	case em_82540:
1636 	case em_82545:
1637 	case em_82546:
1638 	case em_82541:
1639 	case em_82541_rev_2:
1640 		/* Figure out where our I/O BAR is ? */
1641 		for (rid = PCI_MAPREG_START; rid < PCI_MAPREG_END;) {
1642 			val = pci_conf_read(pa->pa_pc, pa->pa_tag, rid);
1643 			if (PCI_MAPREG_TYPE(val) == PCI_MAPREG_TYPE_IO) {
1644 				sc->io_rid = rid;
1645 				break;
1646 			}
1647 			rid += 4;
1648 			if (PCI_MAPREG_MEM_TYPE(val) ==
1649 			    PCI_MAPREG_MEM_TYPE_64BIT)
1650 				rid += 4;	/* skip high bits, too */
1651 		}
1652 
1653 		if (pci_mapreg_map(pa, rid, PCI_MAPREG_TYPE_IO, 0,
1654 		    &sc->osdep.io_bus_space_tag, &sc->osdep.io_bus_space_handle,
1655 		    &sc->osdep.em_iobase, &sc->osdep.em_iosize, 0)) {
1656 			printf(": cannot find i/o space\n");
1657 			return (ENXIO);
1658 		}
1659 
1660 		sc->hw.io_base = 0;
1661 		break;
1662 	default:
1663 		break;
1664 	}
1665 
1666 	/* for ICH8 and family we need to find the flash memory */
1667 	if (IS_ICH8(sc->hw.mac_type)) {
1668 		val = pci_conf_read(pa->pa_pc, pa->pa_tag, EM_FLASH);
1669 		if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1670 			printf(": flash is not mem space\n");
1671 			return (ENXIO);
1672 		}
1673 
1674 		if (pci_mapreg_map(pa, EM_FLASH, PCI_MAPREG_MEM_TYPE(val), 0,
1675 		    &sc->osdep.flash_bus_space_tag, &sc->osdep.flash_bus_space_handle,
1676 		    &sc->osdep.em_flashbase, &sc->osdep.em_flashsize, 0)) {
1677 			printf(": cannot find mem space\n");
1678 			return (ENXIO);
1679 		}
1680         }
1681 
1682 	if (pci_intr_map_msi(pa, &ih) && pci_intr_map(pa, &ih)) {
1683 		printf(": couldn't map interrupt\n");
1684 		return (ENXIO);
1685 	}
1686 
1687 	sc->osdep.dev = (struct device *)sc;
1688 	sc->hw.back = &sc->osdep;
1689 
1690 	intrstr = pci_intr_string(pc, ih);
1691 	sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, em_intr, sc,
1692 					      sc->sc_dv.dv_xname);
1693 	if (sc->sc_intrhand == NULL) {
1694 		printf(": couldn't establish interrupt");
1695 		if (intrstr != NULL)
1696 			printf(" at %s", intrstr);
1697 		printf("\n");
1698 		return (ENXIO);
1699 	}
1700 	printf(": %s", intrstr);
1701 
1702 	/*
1703 	 * the ICP_xxxx device has multiple, duplicate register sets for
1704 	 * use when it is being used as a network processor. Disable those
1705 	 * registers here, as they are not necessary in this context and
1706 	 * can confuse the system
1707 	 */
1708 	if(sc->hw.mac_type == em_icp_xxxx) {
1709 		int offset;
1710 		pcireg_t val;
1711 
1712 		if (!pci_get_capability(sc->osdep.em_pa.pa_pc,
1713 		    sc->osdep.em_pa.pa_tag, PCI_CAP_ID_ST, &offset, &val)) {
1714 			return (0);
1715 		}
1716 		offset += PCI_ST_SMIA_OFFSET;
1717 		pci_conf_write(sc->osdep.em_pa.pa_pc, sc->osdep.em_pa.pa_tag,
1718 		    offset, 0x06);
1719 		E1000_WRITE_REG(&sc->hw, IMC1, ~0x0);
1720 		E1000_WRITE_REG(&sc->hw, IMC2, ~0x0);
1721 	}
1722 	return (0);
1723 }
1724 
1725 void
1726 em_free_pci_resources(struct em_softc *sc)
1727 {
1728 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1729 	pci_chipset_tag_t	pc = pa->pa_pc;
1730 
1731 	if (sc->sc_intrhand)
1732 		pci_intr_disestablish(pc, sc->sc_intrhand);
1733 	sc->sc_intrhand = 0;
1734 
1735 	if (sc->osdep.em_flashbase)
1736 		bus_space_unmap(sc->osdep.flash_bus_space_tag, sc->osdep.flash_bus_space_handle,
1737 				sc->osdep.em_flashsize);
1738 	sc->osdep.em_flashbase = 0;
1739 
1740 	if (sc->osdep.em_iobase)
1741 		bus_space_unmap(sc->osdep.io_bus_space_tag, sc->osdep.io_bus_space_handle,
1742 				sc->osdep.em_iosize);
1743 	sc->osdep.em_iobase = 0;
1744 
1745 	if (sc->osdep.em_membase)
1746 		bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle,
1747 				sc->osdep.em_memsize);
1748 	sc->osdep.em_membase = 0;
1749 }
1750 
1751 /*********************************************************************
1752  *
1753  *  Initialize the hardware to a configuration as specified by the
1754  *  em_softc structure. The controller is reset, the EEPROM is
1755  *  verified, the MAC address is set, then the shared initialization
1756  *  routines are called.
1757  *
1758  **********************************************************************/
1759 int
1760 em_hardware_init(struct em_softc *sc)
1761 {
1762 	uint32_t ret_val;
1763 	u_int16_t rx_buffer_size;
1764 
1765 	INIT_DEBUGOUT("em_hardware_init: begin");
1766 	/* Issue a global reset */
1767 	em_reset_hw(&sc->hw);
1768 
1769 	/* When hardware is reset, fifo_head is also reset */
1770 	sc->tx_fifo_head = 0;
1771 
1772 	/* Make sure we have a good EEPROM before we read from it */
1773 	if (em_validate_eeprom_checksum(&sc->hw) < 0) {
1774 		/*
1775 		 * Some PCIe parts fail the first check due to
1776 		 * the link being in sleep state, call it again,
1777 		 * if it fails a second time its a real issue.
1778 		 */
1779 		if (em_validate_eeprom_checksum(&sc->hw) < 0) {
1780 			printf("%s: The EEPROM Checksum Is Not Valid\n",
1781 			       sc->sc_dv.dv_xname);
1782 			return (EIO);
1783 		}
1784 	}
1785 
1786 	if (em_read_part_num(&sc->hw, &(sc->part_num)) < 0) {
1787 		printf("%s: EEPROM read error while reading part number\n",
1788 		       sc->sc_dv.dv_xname);
1789 		return (EIO);
1790 	}
1791 
1792 	/* Set up smart power down as default off on newer adapters */
1793 	if (!em_smart_pwr_down &&
1794 	     (sc->hw.mac_type == em_82571 ||
1795 	      sc->hw.mac_type == em_82572 ||
1796 	      sc->hw.mac_type == em_82575 ||
1797 	      sc->hw.mac_type == em_82580 ||
1798 	      sc->hw.mac_type == em_i210 ||
1799 	      sc->hw.mac_type == em_i350 )) {
1800 		uint16_t phy_tmp = 0;
1801 
1802 		/* Speed up time to link by disabling smart power down */
1803 		em_read_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
1804 		phy_tmp &= ~IGP02E1000_PM_SPD;
1805 		em_write_phy_reg(&sc->hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp);
1806 	}
1807 
1808 	/*
1809 	 * These parameters control the automatic generation (Tx) and
1810 	 * response (Rx) to Ethernet PAUSE frames.
1811 	 * - High water mark should allow for at least two frames to be
1812 	 *   received after sending an XOFF.
1813 	 * - Low water mark works best when it is very near the high water mark.
1814 	 *   This allows the receiver to restart by sending XON when it has
1815 	 *   drained a bit.  Here we use an arbitary value of 1500 which will
1816 	 *   restart after one full frame is pulled from the buffer.  There
1817 	 *   could be several smaller frames in the buffer and if so they will
1818 	 *   not trigger the XON until their total number reduces the buffer
1819 	 *   by 1500.
1820 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1821 	 */
1822 	rx_buffer_size = ((E1000_READ_REG(&sc->hw, PBA) & 0xffff) << 10 );
1823 
1824 	sc->hw.fc_high_water = rx_buffer_size -
1825 	    EM_ROUNDUP(sc->hw.max_frame_size, 1024);
1826 	sc->hw.fc_low_water = sc->hw.fc_high_water - 1500;
1827 	if (sc->hw.mac_type == em_80003es2lan)
1828 		sc->hw.fc_pause_time = 0xFFFF;
1829 	else
1830 		sc->hw.fc_pause_time = 1000;
1831 	sc->hw.fc_send_xon = TRUE;
1832 	sc->hw.fc = E1000_FC_FULL;
1833 
1834 	if ((ret_val = em_init_hw(&sc->hw)) != 0) {
1835 		if (ret_val == E1000_DEFER_INIT) {
1836 			INIT_DEBUGOUT("\nHardware Initialization Deferred ");
1837 			return (EAGAIN);
1838 		}
1839 		printf("%s: Hardware Initialization Failed",
1840 		       sc->sc_dv.dv_xname);
1841 		return (EIO);
1842 	}
1843 
1844 	em_check_for_link(&sc->hw);
1845 
1846 	return (0);
1847 }
1848 
1849 /*********************************************************************
1850  *
1851  *  Setup networking device structure and register an interface.
1852  *
1853  **********************************************************************/
1854 void
1855 em_setup_interface(struct em_softc *sc)
1856 {
1857 	struct ifnet   *ifp;
1858 	u_char fiber_type = IFM_1000_SX;
1859 
1860 	INIT_DEBUGOUT("em_setup_interface: begin");
1861 
1862 	ifp = &sc->interface_data.ac_if;
1863 	strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ);
1864 	ifp->if_softc = sc;
1865 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1866 	ifp->if_ioctl = em_ioctl;
1867 	ifp->if_start = em_start;
1868 	ifp->if_watchdog = em_watchdog;
1869 	ifp->if_hardmtu =
1870 		sc->hw.max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN;
1871 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1872 	IFQ_SET_READY(&ifp->if_snd);
1873 
1874 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1875 
1876 #if NVLAN > 0
1877 	if (sc->hw.mac_type != em_82575 && sc->hw.mac_type != em_82580 &&
1878 	    sc->hw.mac_type != em_i210 && sc->hw.mac_type != em_i350)
1879 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1880 #endif
1881 
1882 	if (sc->hw.mac_type >= em_82543 && sc->hw.mac_type != em_82575 &&
1883 	    sc->hw.mac_type != em_82580 && sc->hw.mac_type != em_i210 &&
1884 	    sc->hw.mac_type != em_i350)
1885 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1886 
1887 	/*
1888 	 * Specify the media types supported by this adapter and register
1889 	 * callbacks to update media and link information
1890 	 */
1891 	ifmedia_init(&sc->media, IFM_IMASK, em_media_change,
1892 		     em_media_status);
1893 	if (sc->hw.media_type == em_media_type_fiber ||
1894 	    sc->hw.media_type == em_media_type_internal_serdes) {
1895 		if (sc->hw.mac_type == em_82545)
1896 			fiber_type = IFM_1000_LX;
1897 		ifmedia_add(&sc->media, IFM_ETHER | fiber_type | IFM_FDX,
1898 			    0, NULL);
1899 		ifmedia_add(&sc->media, IFM_ETHER | fiber_type,
1900 			    0, NULL);
1901 	} else {
1902 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1903 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1904 			    0, NULL);
1905 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX,
1906 			    0, NULL);
1907 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1908 			    0, NULL);
1909 		if (sc->hw.phy_type != em_phy_ife) {
1910 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1911 				    0, NULL);
1912 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1913 		}
1914 	}
1915 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1916 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1917 
1918 	if_attach(ifp);
1919 	ether_ifattach(ifp);
1920 }
1921 
1922 int
1923 em_detach(struct device *self, int flags)
1924 {
1925 	struct em_softc *sc = (struct em_softc *)self;
1926 	struct ifnet *ifp = &sc->interface_data.ac_if;
1927 	struct pci_attach_args *pa = &sc->osdep.em_pa;
1928 	pci_chipset_tag_t	pc = pa->pa_pc;
1929 
1930 	if (sc->sc_intrhand)
1931 		pci_intr_disestablish(pc, sc->sc_intrhand);
1932 	sc->sc_intrhand = 0;
1933 
1934 	em_stop(sc, 1);
1935 
1936 	em_free_pci_resources(sc);
1937 	em_dma_free(sc, &sc->rxdma);
1938 	em_dma_free(sc, &sc->txdma);
1939 
1940 	ether_ifdetach(ifp);
1941 	if_detach(ifp);
1942 
1943 	return (0);
1944 }
1945 
1946 int
1947 em_activate(struct device *self, int act)
1948 {
1949 	struct em_softc *sc = (struct em_softc *)self;
1950 	struct ifnet *ifp = &sc->interface_data.ac_if;
1951 	int rv = 0;
1952 
1953 	switch (act) {
1954 	case DVACT_SUSPEND:
1955 		if (ifp->if_flags & IFF_RUNNING)
1956 			em_stop(sc, 0);
1957 		/* We have no children atm, but we will soon */
1958 		rv = config_activate_children(self, act);
1959 		break;
1960 	case DVACT_RESUME:
1961 		if (ifp->if_flags & IFF_UP)
1962 			em_init(sc);
1963 		break;
1964 	default:
1965 		rv = config_activate_children(self, act);
1966 		break;
1967 	}
1968 	return (rv);
1969 }
1970 
1971 /*********************************************************************
1972  *
1973  *  Workaround for SmartSpeed on 82541 and 82547 controllers
1974  *
1975  **********************************************************************/
1976 void
1977 em_smartspeed(struct em_softc *sc)
1978 {
1979 	uint16_t phy_tmp;
1980 
1981 	if (sc->link_active || (sc->hw.phy_type != em_phy_igp) ||
1982 	    !sc->hw.autoneg || !(sc->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1983 		return;
1984 
1985 	if (sc->smartspeed == 0) {
1986 		/* If Master/Slave config fault is asserted twice,
1987 		 * we assume back-to-back */
1988 		em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
1989 		if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1990 			return;
1991 		em_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
1992 		if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1993 			em_read_phy_reg(&sc->hw, PHY_1000T_CTRL,
1994 					&phy_tmp);
1995 			if (phy_tmp & CR_1000T_MS_ENABLE) {
1996 				phy_tmp &= ~CR_1000T_MS_ENABLE;
1997 				em_write_phy_reg(&sc->hw,
1998 						    PHY_1000T_CTRL, phy_tmp);
1999 				sc->smartspeed++;
2000 				if (sc->hw.autoneg &&
2001 				    !em_phy_setup_autoneg(&sc->hw) &&
2002 				    !em_read_phy_reg(&sc->hw, PHY_CTRL,
2003 						       &phy_tmp)) {
2004 					phy_tmp |= (MII_CR_AUTO_NEG_EN |
2005 						    MII_CR_RESTART_AUTO_NEG);
2006 					em_write_phy_reg(&sc->hw,
2007 							 PHY_CTRL, phy_tmp);
2008 				}
2009 			}
2010 		}
2011 		return;
2012 	} else if (sc->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2013 		/* If still no link, perhaps using 2/3 pair cable */
2014 		em_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
2015 		phy_tmp |= CR_1000T_MS_ENABLE;
2016 		em_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
2017 		if (sc->hw.autoneg &&
2018 		    !em_phy_setup_autoneg(&sc->hw) &&
2019 		    !em_read_phy_reg(&sc->hw, PHY_CTRL, &phy_tmp)) {
2020 			phy_tmp |= (MII_CR_AUTO_NEG_EN |
2021 				    MII_CR_RESTART_AUTO_NEG);
2022 			em_write_phy_reg(&sc->hw, PHY_CTRL, phy_tmp);
2023 		}
2024 	}
2025 	/* Restart process after EM_SMARTSPEED_MAX iterations */
2026 	if (sc->smartspeed++ == EM_SMARTSPEED_MAX)
2027 		sc->smartspeed = 0;
2028 }
2029 
2030 /*
2031  * Manage DMA'able memory.
2032  */
2033 int
2034 em_dma_malloc(struct em_softc *sc, bus_size_t size,
2035     struct em_dma_alloc *dma, int mapflags)
2036 {
2037 	int r;
2038 
2039 	dma->dma_tag = sc->osdep.em_pa.pa_dmat;
2040 	r = bus_dmamap_create(dma->dma_tag, size, 1,
2041 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
2042 	if (r != 0) {
2043 		printf("%s: em_dma_malloc: bus_dmamap_create failed; "
2044 			"error %u\n", sc->sc_dv.dv_xname, r);
2045 		goto fail_0;
2046 	}
2047 
2048 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
2049 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
2050 	if (r != 0) {
2051 		printf("%s: em_dma_malloc: bus_dmammem_alloc failed; "
2052 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
2053 			(unsigned long)size, r);
2054 		goto fail_1;
2055 	}
2056 
2057 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
2058 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
2059 	if (r != 0) {
2060 		printf("%s: em_dma_malloc: bus_dmammem_map failed; "
2061 			"size %lu, error %d\n", sc->sc_dv.dv_xname,
2062 			(unsigned long)size, r);
2063 		goto fail_2;
2064 	}
2065 
2066 	r = bus_dmamap_load(sc->osdep.em_pa.pa_dmat, dma->dma_map,
2067 			    dma->dma_vaddr, size, NULL,
2068 			    mapflags | BUS_DMA_NOWAIT);
2069 	if (r != 0) {
2070 		printf("%s: em_dma_malloc: bus_dmamap_load failed; "
2071 			"error %u\n", sc->sc_dv.dv_xname, r);
2072 		goto fail_3;
2073 	}
2074 
2075 	dma->dma_size = size;
2076 	return (0);
2077 
2078 fail_3:
2079 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
2080 fail_2:
2081 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
2082 fail_1:
2083 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
2084 fail_0:
2085 	dma->dma_map = NULL;
2086 	dma->dma_tag = NULL;
2087 
2088 	return (r);
2089 }
2090 
2091 void
2092 em_dma_free(struct em_softc *sc, struct em_dma_alloc *dma)
2093 {
2094 	if (dma->dma_tag == NULL)
2095 		return;
2096 
2097 	if (dma->dma_map != NULL) {
2098 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
2099 		    dma->dma_map->dm_mapsize,
2100 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2101 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2102 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
2103 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
2104 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
2105 	}
2106 	dma->dma_tag = NULL;
2107 }
2108 
2109 /*********************************************************************
2110  *
2111  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2112  *  the information needed to transmit a packet on the wire.
2113  *
2114  **********************************************************************/
2115 int
2116 em_allocate_transmit_structures(struct em_softc *sc)
2117 {
2118 	if (!(sc->tx_buffer_area = mallocarray(sc->num_tx_desc,
2119 	    sizeof(struct em_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2120 		printf("%s: Unable to allocate tx_buffer memory\n",
2121 		       sc->sc_dv.dv_xname);
2122 		return (ENOMEM);
2123 	}
2124 
2125 	return (0);
2126 }
2127 
2128 /*********************************************************************
2129  *
2130  *  Allocate and initialize transmit structures.
2131  *
2132  **********************************************************************/
2133 int
2134 em_setup_transmit_structures(struct em_softc *sc)
2135 {
2136 	struct  em_buffer *tx_buffer;
2137 	int error, i;
2138 
2139 	if ((error = em_allocate_transmit_structures(sc)) != 0)
2140 		goto fail;
2141 
2142 	bzero((void *) sc->tx_desc_base,
2143 	      (sizeof(struct em_tx_desc)) * sc->num_tx_desc);
2144 
2145 	sc->txtag = sc->osdep.em_pa.pa_dmat;
2146 
2147 	tx_buffer = sc->tx_buffer_area;
2148 	for (i = 0; i < sc->num_tx_desc; i++) {
2149 		error = bus_dmamap_create(sc->txtag, MAX_JUMBO_FRAME_SIZE,
2150 			    EM_MAX_SCATTER, MAX_JUMBO_FRAME_SIZE, 0,
2151 			    BUS_DMA_NOWAIT, &tx_buffer->map);
2152 		if (error != 0) {
2153 			printf("%s: Unable to create TX DMA map\n",
2154 			    sc->sc_dv.dv_xname);
2155 			goto fail;
2156 		}
2157 		tx_buffer++;
2158 	}
2159 
2160 	sc->next_avail_tx_desc = 0;
2161 	sc->next_tx_to_clean = 0;
2162 
2163 	/* Set number of descriptors available */
2164 	sc->num_tx_desc_avail = sc->num_tx_desc;
2165 
2166 	/* Set checksum context */
2167 	sc->active_checksum_context = OFFLOAD_NONE;
2168 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2169 	    sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2170 
2171 	return (0);
2172 
2173 fail:
2174 	em_free_transmit_structures(sc);
2175 	return (error);
2176 }
2177 
2178 /*********************************************************************
2179  *
2180  *  Enable transmit unit.
2181  *
2182  **********************************************************************/
2183 void
2184 em_initialize_transmit_unit(struct em_softc *sc)
2185 {
2186 	u_int32_t	reg_tctl, reg_tipg = 0;
2187 	u_int64_t	bus_addr;
2188 
2189 	INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2190 
2191 	/* Setup the Base and Length of the Tx Descriptor Ring */
2192 	bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr;
2193 	E1000_WRITE_REG(&sc->hw, TDLEN,
2194 			sc->num_tx_desc *
2195 			sizeof(struct em_tx_desc));
2196 	E1000_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32));
2197 	E1000_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr);
2198 
2199 	/* Setup the HW Tx Head and Tail descriptor pointers */
2200 	E1000_WRITE_REG(&sc->hw, TDT, 0);
2201 	E1000_WRITE_REG(&sc->hw, TDH, 0);
2202 
2203 	HW_DEBUGOUT2("Base = %x, Length = %x\n",
2204 		     E1000_READ_REG(&sc->hw, TDBAL),
2205 		     E1000_READ_REG(&sc->hw, TDLEN));
2206 
2207 	/* Set the default values for the Tx Inter Packet Gap timer */
2208 	switch (sc->hw.mac_type) {
2209 	case em_82542_rev2_0:
2210 	case em_82542_rev2_1:
2211 		reg_tipg = DEFAULT_82542_TIPG_IPGT;
2212 		reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2213 		reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2214 		break;
2215 	case em_80003es2lan:
2216 		reg_tipg = DEFAULT_82543_TIPG_IPGR1;
2217 		reg_tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2218 		break;
2219 	default:
2220 		if (sc->hw.media_type == em_media_type_fiber ||
2221 		    sc->hw.media_type == em_media_type_internal_serdes)
2222 			reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2223 		else
2224 			reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2225 		reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2226 		reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2227 	}
2228 
2229 
2230 	E1000_WRITE_REG(&sc->hw, TIPG, reg_tipg);
2231 	E1000_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay);
2232 	if (sc->hw.mac_type >= em_82540)
2233 		E1000_WRITE_REG(&sc->hw, TADV, sc->tx_abs_int_delay);
2234 
2235 	/* Setup Transmit Descriptor Base Settings */
2236 	sc->txd_cmd = E1000_TXD_CMD_IFCS;
2237 
2238 	if (sc->hw.mac_type == em_82575 || sc->hw.mac_type == em_82580 ||
2239 	    sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350) {
2240 		/* 82575/6 need to enable the TX queue and lack the IDE bit */
2241 		reg_tctl = E1000_READ_REG(&sc->hw, TXDCTL);
2242 		reg_tctl |= E1000_TXDCTL_QUEUE_ENABLE;
2243 		E1000_WRITE_REG(&sc->hw, TXDCTL, reg_tctl);
2244 	} else if (sc->tx_int_delay > 0)
2245 		sc->txd_cmd |= E1000_TXD_CMD_IDE;
2246 
2247 	/* Program the Transmit Control Register */
2248 	reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2249 		   (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2250 	if (sc->hw.mac_type >= em_82571)
2251 		reg_tctl |= E1000_TCTL_MULR;
2252 	if (sc->link_duplex == FULL_DUPLEX)
2253 		reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2254 	else
2255 		reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2256 	/* This write will effectively turn on the transmit unit */
2257 	E1000_WRITE_REG(&sc->hw, TCTL, reg_tctl);
2258 }
2259 
2260 /*********************************************************************
2261  *
2262  *  Free all transmit related data structures.
2263  *
2264  **********************************************************************/
2265 void
2266 em_free_transmit_structures(struct em_softc *sc)
2267 {
2268 	struct em_buffer   *tx_buffer;
2269 	int		i;
2270 
2271 	INIT_DEBUGOUT("free_transmit_structures: begin");
2272 
2273 	if (sc->tx_buffer_area != NULL) {
2274 		tx_buffer = sc->tx_buffer_area;
2275 		for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2276 			if (tx_buffer->map != NULL &&
2277 			    tx_buffer->map->dm_nsegs > 0) {
2278 				bus_dmamap_sync(sc->txtag, tx_buffer->map,
2279 				    0, tx_buffer->map->dm_mapsize,
2280 				    BUS_DMASYNC_POSTWRITE);
2281 				bus_dmamap_unload(sc->txtag,
2282 				    tx_buffer->map);
2283 			}
2284 			if (tx_buffer->m_head != NULL) {
2285 				m_freem(tx_buffer->m_head);
2286 				tx_buffer->m_head = NULL;
2287 			}
2288 			if (tx_buffer->map != NULL) {
2289 				bus_dmamap_destroy(sc->txtag,
2290 				    tx_buffer->map);
2291 				tx_buffer->map = NULL;
2292 			}
2293 		}
2294 	}
2295 	if (sc->tx_buffer_area != NULL) {
2296 		free(sc->tx_buffer_area, M_DEVBUF, 0);
2297 		sc->tx_buffer_area = NULL;
2298 	}
2299 	if (sc->txtag != NULL)
2300 		sc->txtag = NULL;
2301 }
2302 
2303 /*********************************************************************
2304  *
2305  *  The offload context needs to be set when we transfer the first
2306  *  packet of a particular protocol (TCP/UDP). We change the
2307  *  context only if the protocol type changes.
2308  *
2309  **********************************************************************/
2310 void
2311 em_transmit_checksum_setup(struct em_softc *sc, struct mbuf *mp,
2312     u_int32_t *txd_upper, u_int32_t *txd_lower)
2313 {
2314 	struct em_context_desc *TXD;
2315 	struct em_buffer *tx_buffer;
2316 	int curr_txd;
2317 
2318 	if (mp->m_pkthdr.csum_flags) {
2319 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) {
2320 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2321 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2322 			if (sc->active_checksum_context == OFFLOAD_TCP_IP)
2323 				return;
2324 			else
2325 				sc->active_checksum_context = OFFLOAD_TCP_IP;
2326 		} else if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) {
2327 			*txd_upper = E1000_TXD_POPTS_TXSM << 8;
2328 			*txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2329 			if (sc->active_checksum_context == OFFLOAD_UDP_IP)
2330 				return;
2331 			else
2332 				sc->active_checksum_context = OFFLOAD_UDP_IP;
2333 		} else {
2334 			*txd_upper = 0;
2335 			*txd_lower = 0;
2336 			return;
2337 		}
2338 	} else {
2339 		*txd_upper = 0;
2340 		*txd_lower = 0;
2341 		return;
2342 	}
2343 
2344 	/* If we reach this point, the checksum offload context
2345 	 * needs to be reset.
2346 	 */
2347 	curr_txd = sc->next_avail_tx_desc;
2348 	tx_buffer = &sc->tx_buffer_area[curr_txd];
2349 	TXD = (struct em_context_desc *) &sc->tx_desc_base[curr_txd];
2350 
2351 	TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2352 	TXD->lower_setup.ip_fields.ipcso =
2353 	    ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2354 	TXD->lower_setup.ip_fields.ipcse =
2355 	    htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2356 
2357 	TXD->upper_setup.tcp_fields.tucss =
2358 	    ETHER_HDR_LEN + sizeof(struct ip);
2359 	TXD->upper_setup.tcp_fields.tucse = htole16(0);
2360 
2361 	if (sc->active_checksum_context == OFFLOAD_TCP_IP) {
2362 		TXD->upper_setup.tcp_fields.tucso =
2363 		    ETHER_HDR_LEN + sizeof(struct ip) +
2364 		    offsetof(struct tcphdr, th_sum);
2365 	} else if (sc->active_checksum_context == OFFLOAD_UDP_IP) {
2366 		TXD->upper_setup.tcp_fields.tucso =
2367 		    ETHER_HDR_LEN + sizeof(struct ip) +
2368 		    offsetof(struct udphdr, uh_sum);
2369 	}
2370 
2371 	TXD->tcp_seg_setup.data = htole32(0);
2372 	TXD->cmd_and_length = htole32(sc->txd_cmd | E1000_TXD_CMD_DEXT);
2373 
2374 	tx_buffer->m_head = NULL;
2375 	tx_buffer->next_eop = -1;
2376 
2377 	if (++curr_txd == sc->num_tx_desc)
2378 		curr_txd = 0;
2379 
2380 	sc->num_tx_desc_avail--;
2381 	sc->next_avail_tx_desc = curr_txd;
2382 }
2383 
2384 /**********************************************************************
2385  *
2386  *  Examine each tx_buffer in the used queue. If the hardware is done
2387  *  processing the packet then free associated resources. The
2388  *  tx_buffer is put back on the free queue.
2389  *
2390  **********************************************************************/
2391 void
2392 em_txeof(struct em_softc *sc)
2393 {
2394 	int first, last, done, num_avail;
2395 	struct em_buffer *tx_buffer;
2396 	struct em_tx_desc   *tx_desc, *eop_desc;
2397 	struct ifnet   *ifp = &sc->interface_data.ac_if;
2398 
2399 	if (sc->num_tx_desc_avail == sc->num_tx_desc)
2400 		return;
2401 
2402 	num_avail = sc->num_tx_desc_avail;
2403 	first = sc->next_tx_to_clean;
2404 	tx_desc = &sc->tx_desc_base[first];
2405 	tx_buffer = &sc->tx_buffer_area[first];
2406 	last = tx_buffer->next_eop;
2407 	eop_desc = &sc->tx_desc_base[last];
2408 
2409 	/*
2410 	 * What this does is get the index of the
2411 	 * first descriptor AFTER the EOP of the
2412 	 * first packet, that way we can do the
2413 	 * simple comparison on the inner while loop.
2414 	 */
2415 	if (++last == sc->num_tx_desc)
2416 		last = 0;
2417 	done = last;
2418 
2419 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2420 	    sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2421 	while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2422 		/* We clean the range of the packet */
2423 		while (first != done) {
2424 			tx_desc->upper.data = 0;
2425 			tx_desc->lower.data = 0;
2426 			num_avail++;
2427 
2428 			if (tx_buffer->m_head != NULL) {
2429 				ifp->if_opackets++;
2430 				if (tx_buffer->map->dm_nsegs > 0) {
2431 					bus_dmamap_sync(sc->txtag,
2432 					    tx_buffer->map, 0,
2433 					    tx_buffer->map->dm_mapsize,
2434 					    BUS_DMASYNC_POSTWRITE);
2435 					bus_dmamap_unload(sc->txtag,
2436 					    tx_buffer->map);
2437 				}
2438 				m_freem(tx_buffer->m_head);
2439 				tx_buffer->m_head = NULL;
2440 			}
2441 			tx_buffer->next_eop = -1;
2442 
2443 			if (++first == sc->num_tx_desc)
2444 				first = 0;
2445 
2446 			tx_buffer = &sc->tx_buffer_area[first];
2447 			tx_desc = &sc->tx_desc_base[first];
2448 		}
2449 		/* See if we can continue to the next packet */
2450 		last = tx_buffer->next_eop;
2451 		if (last != -1) {
2452 			eop_desc = &sc->tx_desc_base[last];
2453 			/* Get new done point */
2454 			if (++last == sc->num_tx_desc)
2455 				last = 0;
2456 			done = last;
2457 		} else
2458 			break;
2459 	}
2460 	bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0,
2461 	    sc->txdma.dma_map->dm_mapsize,
2462 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2463 
2464 	sc->next_tx_to_clean = first;
2465 
2466 	/*
2467 	 * If we have enough room, clear IFF_OACTIVE to tell the stack
2468 	 * that it is OK to send packets.
2469 	 * If there are no pending descriptors, clear the timeout. Otherwise,
2470 	 * if some descriptors have been freed, restart the timeout.
2471 	 */
2472 	if (num_avail > EM_TX_CLEANUP_THRESHOLD)
2473 		ifp->if_flags &= ~IFF_OACTIVE;
2474 
2475 	/* All clean, turn off the timer */
2476 	if (num_avail == sc->num_tx_desc)
2477 		ifp->if_timer = 0;
2478 	/* Some cleaned, reset the timer */
2479 	else if (num_avail != sc->num_tx_desc_avail)
2480 		ifp->if_timer = EM_TX_TIMEOUT;
2481 
2482 	sc->num_tx_desc_avail = num_avail;
2483 }
2484 
2485 /*********************************************************************
2486  *
2487  *  Get a buffer from system mbuf buffer pool.
2488  *
2489  **********************************************************************/
2490 int
2491 em_get_buf(struct em_softc *sc, int i)
2492 {
2493 	struct mbuf    *m;
2494 	struct em_buffer *pkt;
2495 	struct em_rx_desc *desc;
2496 	int error;
2497 
2498 	pkt = &sc->rx_buffer_area[i];
2499 	desc = &sc->rx_desc_base[i];
2500 
2501 	if (pkt->m_head != NULL) {
2502 		printf("%s: em_get_buf: slot %d already has an mbuf\n",
2503 		    sc->sc_dv.dv_xname, i);
2504 		return (ENOBUFS);
2505 	}
2506 
2507 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
2508 	if (!m) {
2509 		sc->mbuf_cluster_failed++;
2510 		return (ENOBUFS);
2511 	}
2512 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2513 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2514 		m_adj(m, ETHER_ALIGN);
2515 
2516 	error = bus_dmamap_load_mbuf(sc->rxtag, pkt->map, m, BUS_DMA_NOWAIT);
2517 	if (error) {
2518 		m_freem(m);
2519 		return (error);
2520 	}
2521 
2522 	bus_dmamap_sync(sc->rxtag, pkt->map, 0, pkt->map->dm_mapsize,
2523 	    BUS_DMASYNC_PREREAD);
2524 	pkt->m_head = m;
2525 
2526 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2527 	    sizeof(*desc) * i, sizeof(*desc), BUS_DMASYNC_POSTWRITE);
2528 
2529 	bzero(desc, sizeof(*desc));
2530 	desc->buffer_addr = htole64(pkt->map->dm_segs[0].ds_addr);
2531 
2532 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2533 	    sizeof(*desc) * i, sizeof(*desc), BUS_DMASYNC_PREWRITE);
2534 
2535 	return (0);
2536 }
2537 
2538 /*********************************************************************
2539  *
2540  *  Allocate memory for rx_buffer structures. Since we use one
2541  *  rx_buffer per received packet, the maximum number of rx_buffer's
2542  *  that we'll need is equal to the number of receive descriptors
2543  *  that we've allocated.
2544  *
2545  **********************************************************************/
2546 int
2547 em_allocate_receive_structures(struct em_softc *sc)
2548 {
2549 	int		i, error;
2550 	struct em_buffer *rx_buffer;
2551 
2552 	if (!(sc->rx_buffer_area = mallocarray(sc->num_rx_desc,
2553 	    sizeof(struct em_buffer), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2554 		printf("%s: Unable to allocate rx_buffer memory\n",
2555 		       sc->sc_dv.dv_xname);
2556 		return (ENOMEM);
2557 	}
2558 
2559 	sc->rxtag = sc->osdep.em_pa.pa_dmat;
2560 
2561 	rx_buffer = sc->rx_buffer_area;
2562 	for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
2563 		error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1,
2564 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rx_buffer->map);
2565 		if (error != 0) {
2566 			printf("%s: em_allocate_receive_structures: "
2567 			    "bus_dmamap_create failed; error %u\n",
2568 			    sc->sc_dv.dv_xname, error);
2569 			goto fail;
2570 		}
2571 		rx_buffer->m_head = NULL;
2572 	}
2573 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0,
2574 	    sc->rxdma.dma_map->dm_mapsize,
2575 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2576 
2577         return (0);
2578 
2579 fail:
2580 	em_free_receive_structures(sc);
2581 	return (error);
2582 }
2583 
2584 /*********************************************************************
2585  *
2586  *  Allocate and initialize receive structures.
2587  *
2588  **********************************************************************/
2589 int
2590 em_setup_receive_structures(struct em_softc *sc)
2591 {
2592 	struct ifnet *ifp = &sc->interface_data.ac_if;
2593 
2594 	memset(sc->rx_desc_base, 0,
2595 	    sizeof(struct em_rx_desc) * sc->num_rx_desc);
2596 
2597 	if (em_allocate_receive_structures(sc))
2598 		return (ENOMEM);
2599 
2600 	/* Setup our descriptor pointers */
2601 	sc->next_rx_desc_to_check = 0;
2602 	sc->last_rx_desc_filled = sc->num_rx_desc - 1;
2603 
2604 	if_rxr_init(&sc->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
2605 	    sc->num_rx_desc);
2606 
2607 	if (em_rxfill(sc) == 0) {
2608 		printf("%s: unable to fill any rx descriptors\n",
2609 		    sc->sc_dv.dv_xname);
2610 	}
2611 
2612 	return (0);
2613 }
2614 
2615 /*********************************************************************
2616  *
2617  *  Enable receive unit.
2618  *
2619  **********************************************************************/
2620 void
2621 em_initialize_receive_unit(struct em_softc *sc)
2622 {
2623 	u_int32_t	reg_rctl;
2624 	u_int32_t	reg_rxcsum;
2625 	u_int64_t	bus_addr;
2626 
2627 	INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2628 
2629 	/* Make sure receives are disabled while setting up the descriptor ring */
2630 	E1000_WRITE_REG(&sc->hw, RCTL, 0);
2631 
2632 	/* Set the Receive Delay Timer Register */
2633 	E1000_WRITE_REG(&sc->hw, RDTR,
2634 			sc->rx_int_delay | E1000_RDT_FPDB);
2635 
2636 	if (sc->hw.mac_type >= em_82540) {
2637 		if (sc->rx_int_delay)
2638 			E1000_WRITE_REG(&sc->hw, RADV, sc->rx_abs_int_delay);
2639 
2640 		/* Set the interrupt throttling rate.  Value is calculated
2641 		 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */
2642 		E1000_WRITE_REG(&sc->hw, ITR, DEFAULT_ITR);
2643 	}
2644 
2645 	/* Setup the Base and Length of the Rx Descriptor Ring */
2646 	bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr;
2647 	E1000_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc *
2648 			sizeof(struct em_rx_desc));
2649 	E1000_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32));
2650 	E1000_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr);
2651 
2652 	/* Setup the Receive Control Register */
2653 	reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2654 	    E1000_RCTL_RDMTS_HALF |
2655 	    (sc->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2656 
2657 	if (sc->hw.tbi_compatibility_on == TRUE)
2658 		reg_rctl |= E1000_RCTL_SBP;
2659 
2660 	/*
2661 	 * The i350 has a bug where it always strips the CRC whether
2662 	 * asked to or not.  So ask for stripped CRC here and
2663 	 * cope in rxeof
2664 	 */
2665 	if (sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350)
2666 		reg_rctl |= E1000_RCTL_SECRC;
2667 
2668 	switch (sc->rx_buffer_len) {
2669 	default:
2670 	case EM_RXBUFFER_2048:
2671 		reg_rctl |= E1000_RCTL_SZ_2048;
2672 		break;
2673 	case EM_RXBUFFER_4096:
2674 		reg_rctl |= E1000_RCTL_SZ_4096|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2675 		break;
2676 	case EM_RXBUFFER_8192:
2677 		reg_rctl |= E1000_RCTL_SZ_8192|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2678 		break;
2679 	case EM_RXBUFFER_16384:
2680 		reg_rctl |= E1000_RCTL_SZ_16384|E1000_RCTL_BSEX|E1000_RCTL_LPE;
2681 		break;
2682 	}
2683 
2684 	if (sc->hw.max_frame_size != ETHER_MAX_LEN)
2685 		reg_rctl |= E1000_RCTL_LPE;
2686 
2687 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
2688 	if (sc->hw.mac_type >= em_82543) {
2689 		reg_rxcsum = E1000_READ_REG(&sc->hw, RXCSUM);
2690 		reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2691 		E1000_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum);
2692 	}
2693 
2694 	/*
2695 	 * XXX TEMPORARY WORKAROUND: on some systems with 82573
2696 	 * long latencies are observed, like Lenovo X60.
2697 	 */
2698 	if (sc->hw.mac_type == em_82573)
2699 		E1000_WRITE_REG(&sc->hw, RDTR, 0x20);
2700 
2701 	if (sc->hw.mac_type == em_82575 || sc->hw.mac_type == em_82580 ||
2702 	    sc->hw.mac_type == em_i210 || sc->hw.mac_type == em_i350) {
2703 		/* 82575/6 need to enable the RX queue */
2704 		uint32_t reg;
2705 		reg = E1000_READ_REG(&sc->hw, RXDCTL);
2706 		reg |= E1000_RXDCTL_QUEUE_ENABLE;
2707 		E1000_WRITE_REG(&sc->hw, RXDCTL, reg);
2708 	}
2709 
2710 	/* Enable Receives */
2711 	E1000_WRITE_REG(&sc->hw, RCTL, reg_rctl);
2712 
2713 	/* Setup the HW Rx Head and Tail Descriptor Pointers */
2714 	E1000_WRITE_REG(&sc->hw, RDH, 0);
2715 	E1000_WRITE_REG(&sc->hw, RDT, sc->last_rx_desc_filled);
2716 }
2717 
2718 /*********************************************************************
2719  *
2720  *  Free receive related data structures.
2721  *
2722  **********************************************************************/
2723 void
2724 em_free_receive_structures(struct em_softc *sc)
2725 {
2726 	struct em_buffer   *rx_buffer;
2727 	int		i;
2728 
2729 	INIT_DEBUGOUT("free_receive_structures: begin");
2730 
2731 	if (sc->rx_buffer_area != NULL) {
2732 		rx_buffer = sc->rx_buffer_area;
2733 		for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) {
2734 			if (rx_buffer->m_head != NULL) {
2735 				bus_dmamap_sync(sc->rxtag, rx_buffer->map,
2736 				    0, rx_buffer->map->dm_mapsize,
2737 				    BUS_DMASYNC_POSTREAD);
2738 				bus_dmamap_unload(sc->rxtag, rx_buffer->map);
2739 				m_freem(rx_buffer->m_head);
2740 				rx_buffer->m_head = NULL;
2741 			}
2742 			bus_dmamap_destroy(sc->rxtag, rx_buffer->map);
2743 		}
2744 	}
2745 	if (sc->rx_buffer_area != NULL) {
2746 		free(sc->rx_buffer_area, M_DEVBUF, 0);
2747 		sc->rx_buffer_area = NULL;
2748 	}
2749 	if (sc->rxtag != NULL)
2750 		sc->rxtag = NULL;
2751 
2752 	if (sc->fmp != NULL) {
2753 		m_freem(sc->fmp);
2754 		sc->fmp = NULL;
2755 		sc->lmp = NULL;
2756 	}
2757 }
2758 
2759 #ifdef __STRICT_ALIGNMENT
2760 void
2761 em_realign(struct em_softc *sc, struct mbuf *m, u_int16_t *prev_len_adj)
2762 {
2763 	unsigned char tmp_align_buf[ETHER_ALIGN];
2764 	int tmp_align_buf_len = 0;
2765 
2766 	/*
2767 	 * The Ethernet payload is not 32-bit aligned when
2768 	 * Jumbo packets are enabled, so on architectures with
2769 	 * strict alignment we need to shift the entire packet
2770 	 * ETHER_ALIGN bytes. Ugh.
2771 	 */
2772 	if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2773 		return;
2774 
2775 	if (*prev_len_adj > sc->align_buf_len)
2776 		*prev_len_adj -= sc->align_buf_len;
2777 	else
2778 		*prev_len_adj = 0;
2779 
2780 	if (m->m_len > (MCLBYTES - ETHER_ALIGN)) {
2781 		bcopy(m->m_data + (MCLBYTES - ETHER_ALIGN),
2782 		    &tmp_align_buf, ETHER_ALIGN);
2783 		tmp_align_buf_len = m->m_len -
2784 		    (MCLBYTES - ETHER_ALIGN);
2785 		m->m_len -= ETHER_ALIGN;
2786 	}
2787 
2788 	if (m->m_len) {
2789 		bcopy(m->m_data, m->m_data + ETHER_ALIGN, m->m_len);
2790 		if (!sc->align_buf_len)
2791 			m->m_data += ETHER_ALIGN;
2792 	}
2793 
2794 	if (sc->align_buf_len) {
2795 		m->m_len += sc->align_buf_len;
2796 		bcopy(&sc->align_buf, m->m_data, sc->align_buf_len);
2797 	}
2798 
2799 	if (tmp_align_buf_len)
2800 		bcopy(&tmp_align_buf, &sc->align_buf, tmp_align_buf_len);
2801 
2802 	sc->align_buf_len = tmp_align_buf_len;
2803 }
2804 #endif /* __STRICT_ALIGNMENT */
2805 
2806 int
2807 em_rxfill(struct em_softc *sc)
2808 {
2809 	u_int slots;
2810 	int post = 0;
2811 	int i;
2812 
2813 	i = sc->last_rx_desc_filled;
2814 
2815 	for (slots = if_rxr_get(&sc->rx_ring, sc->num_rx_desc);
2816 	    slots > 0; slots--) {
2817 		if (++i == sc->num_rx_desc)
2818 			i = 0;
2819 
2820 		if (em_get_buf(sc, i) != 0)
2821 			break;
2822 
2823 		sc->last_rx_desc_filled = i;
2824 		post = 1;
2825 	}
2826 
2827 	if_rxr_put(&sc->rx_ring, slots);
2828 
2829 	return (post);
2830 }
2831 
2832 /*********************************************************************
2833  *
2834  *  This routine executes in interrupt context. It replenishes
2835  *  the mbufs in the descriptor and sends data which has been
2836  *  dma'ed into host memory to upper layer.
2837  *
2838  *********************************************************************/
2839 void
2840 em_rxeof(struct em_softc *sc)
2841 {
2842 	struct ifnet	    *ifp = &sc->interface_data.ac_if;
2843 	struct mbuf	    *m;
2844 	u_int8_t	    accept_frame = 0;
2845 	u_int8_t	    eop = 0;
2846 	u_int16_t	    len, desc_len, prev_len_adj;
2847 	int		    i;
2848 
2849 	/* Pointer to the receive descriptor being examined. */
2850 	struct em_rx_desc   *desc;
2851 	struct em_buffer    *pkt;
2852 	u_int8_t	    status;
2853 
2854 	if (if_rxr_inuse(&sc->rx_ring) == 0)
2855 		return;
2856 
2857 	i = sc->next_rx_desc_to_check;
2858 
2859 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2860 	    0, sizeof(*desc) * sc->num_rx_desc,
2861 	    BUS_DMASYNC_POSTREAD);
2862 
2863 	do {
2864 		m = NULL;
2865 
2866 		desc = &sc->rx_desc_base[i];
2867 		pkt = &sc->rx_buffer_area[i];
2868 
2869 		status = desc->status;
2870 		if (!ISSET(status, E1000_RXD_STAT_DD))
2871 			break;
2872 
2873 		/* pull the mbuf off the ring */
2874 		bus_dmamap_sync(sc->rxtag, pkt->map, 0, pkt->map->dm_mapsize,
2875 		    BUS_DMASYNC_POSTREAD);
2876 		bus_dmamap_unload(sc->rxtag, pkt->map);
2877 		m = pkt->m_head;
2878 		pkt->m_head = NULL;
2879 
2880 		if (m == NULL) {
2881 			panic("em_rxeof: NULL mbuf in slot %d "
2882 			    "(nrx %d, filled %d)", i,
2883 			    if_rxr_inuse(&sc->rx_ring),
2884 			    sc->last_rx_desc_filled);
2885 		}
2886 
2887 		if_rxr_put(&sc->rx_ring, 1);
2888 
2889 		accept_frame = 1;
2890 		prev_len_adj = 0;
2891 		desc_len = letoh16(desc->length);
2892 
2893 		if (status & E1000_RXD_STAT_EOP) {
2894 			eop = 1;
2895 			if (desc_len < ETHER_CRC_LEN) {
2896 				len = 0;
2897 				prev_len_adj = ETHER_CRC_LEN - desc_len;
2898 			} else if (sc->hw.mac_type == em_i210 ||
2899 			    sc->hw.mac_type == em_i350)
2900 				len = desc_len;
2901 			else
2902 				len = desc_len - ETHER_CRC_LEN;
2903 		} else {
2904 			eop = 0;
2905 			len = desc_len;
2906 		}
2907 
2908 		if (desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2909 			u_int8_t last_byte;
2910 			u_int32_t pkt_len = desc_len;
2911 
2912 			if (sc->fmp != NULL)
2913 				pkt_len += sc->fmp->m_pkthdr.len;
2914 
2915 			last_byte = *(mtod(m, caddr_t) + desc_len - 1);
2916 			if (TBI_ACCEPT(&sc->hw, status, desc->errors,
2917 			    pkt_len, last_byte)) {
2918 #ifndef SMALL_KERNEL
2919 				em_tbi_adjust_stats(&sc->hw, &sc->stats,
2920 				    pkt_len, sc->hw.mac_addr);
2921 #endif
2922 				if (len > 0)
2923 					len--;
2924 			} else
2925 				accept_frame = 0;
2926 		}
2927 
2928 		if (accept_frame) {
2929 			/* Assign correct length to the current fragment */
2930 			m->m_len = len;
2931 
2932 			em_realign(sc, m, &prev_len_adj); /* STRICT_ALIGN */
2933 
2934 			if (sc->fmp == NULL) {
2935 				m->m_pkthdr.len = m->m_len;
2936 				sc->fmp = m;	 /* Store the first mbuf */
2937 				sc->lmp = m;
2938 			} else {
2939 				/* Chain mbuf's together */
2940 				m->m_flags &= ~M_PKTHDR;
2941 				/*
2942 				 * Adjust length of previous mbuf in chain if
2943 				 * we received less than 4 bytes in the last
2944 				 * descriptor.
2945 				 */
2946 				if (prev_len_adj > 0) {
2947 					sc->lmp->m_len -= prev_len_adj;
2948 					sc->fmp->m_pkthdr.len -= prev_len_adj;
2949 				}
2950 				sc->lmp->m_next = m;
2951 				sc->lmp = m;
2952 				sc->fmp->m_pkthdr.len += m->m_len;
2953 			}
2954 
2955 			if (eop) {
2956 				ifp->if_ipackets++;
2957 
2958 				m = sc->fmp;
2959 				m->m_pkthdr.rcvif = ifp;
2960 
2961 				em_receive_checksum(sc, desc, m);
2962 #if NVLAN > 0
2963 				if (desc->status & E1000_RXD_STAT_VP) {
2964 					m->m_pkthdr.ether_vtag =
2965 					    letoh16(desc->special);
2966 					m->m_flags |= M_VLANTAG;
2967 				}
2968 #endif
2969 #if NBPFILTER > 0
2970 				if (ifp->if_bpf) {
2971 					bpf_mtap_ether(ifp->if_bpf, m,
2972 					    BPF_DIRECTION_IN);
2973 				}
2974 #endif
2975 
2976 				ether_input_mbuf(ifp, m);
2977 
2978 				sc->fmp = NULL;
2979 				sc->lmp = NULL;
2980 			}
2981 		} else {
2982 			sc->dropped_pkts++;
2983 
2984 			if (sc->fmp != NULL) {
2985  				m_freem(sc->fmp);
2986 				sc->fmp = NULL;
2987 				sc->lmp = NULL;
2988 			}
2989 
2990 			m_freem(m);
2991 		}
2992 
2993 		/* Advance our pointers to the next descriptor. */
2994 		if (++i == sc->num_rx_desc)
2995 			i = 0;
2996 	} while (if_rxr_inuse(&sc->rx_ring) > 0);
2997 
2998 	bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map,
2999 	    0, sizeof(*desc) * sc->num_rx_desc,
3000 	    BUS_DMASYNC_PREREAD);
3001 
3002 	sc->next_rx_desc_to_check = i;
3003 }
3004 
3005 /*********************************************************************
3006  *
3007  *  Verify that the hardware indicated that the checksum is valid.
3008  *  Inform the stack about the status of checksum so that stack
3009  *  doesn't spend time verifying the checksum.
3010  *
3011  *********************************************************************/
3012 void
3013 em_receive_checksum(struct em_softc *sc, struct em_rx_desc *rx_desc,
3014     struct mbuf *mp)
3015 {
3016 	/* 82543 or newer only */
3017 	if ((sc->hw.mac_type < em_82543) ||
3018 	    /* Ignore Checksum bit is set */
3019 	    (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3020 		mp->m_pkthdr.csum_flags = 0;
3021 		return;
3022 	}
3023 
3024 	if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3025 		/* Did it pass? */
3026 		if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3027 			/* IP Checksum Good */
3028 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
3029 
3030 		} else
3031 			mp->m_pkthdr.csum_flags = 0;
3032 	}
3033 
3034 	if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3035 		/* Did it pass? */
3036 		if (!(rx_desc->errors & E1000_RXD_ERR_TCPE))
3037 			mp->m_pkthdr.csum_flags |=
3038 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
3039 	}
3040 }
3041 
3042 /*
3043  * This turns on the hardware offload of the VLAN
3044  * tag insertion and strip
3045  */
3046 void
3047 em_enable_hw_vlans(struct em_softc *sc)
3048 {
3049 	uint32_t ctrl;
3050 
3051 	ctrl = E1000_READ_REG(&sc->hw, CTRL);
3052 	ctrl |= E1000_CTRL_VME;
3053 	E1000_WRITE_REG(&sc->hw, CTRL, ctrl);
3054 }
3055 
3056 void
3057 em_enable_intr(struct em_softc *sc)
3058 {
3059 	E1000_WRITE_REG(&sc->hw, IMS, (IMS_ENABLE_MASK));
3060 }
3061 
3062 void
3063 em_disable_intr(struct em_softc *sc)
3064 {
3065 	/*
3066 	 * The first version of 82542 had an errata where when link
3067 	 * was forced it would stay up even if the cable was disconnected
3068 	 * Sequence errors were used to detect the disconnect and then
3069 	 * the driver would unforce the link.  This code is in the ISR.
3070 	 * For this to work correctly the Sequence error interrupt had
3071 	 * to be enabled all the time.
3072 	 */
3073 
3074 	if (sc->hw.mac_type == em_82542_rev2_0)
3075 		E1000_WRITE_REG(&sc->hw, IMC, (0xffffffff & ~E1000_IMC_RXSEQ));
3076 	else
3077 		E1000_WRITE_REG(&sc->hw, IMC, 0xffffffff);
3078 }
3079 
3080 void
3081 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3082 {
3083 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3084 	pcireg_t val;
3085 
3086 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, reg & ~0x3);
3087 	if (reg & 0x2) {
3088 		val &= 0x0000ffff;
3089 		val |= (*value << 16);
3090 	} else {
3091 		val &= 0xffff0000;
3092 		val |= *value;
3093 	}
3094 	pci_conf_write(pa->pa_pc, pa->pa_tag, reg & ~0x3, val);
3095 }
3096 
3097 void
3098 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3099 {
3100 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3101 	pcireg_t val;
3102 
3103 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, reg & ~0x3);
3104 	if (reg & 0x2)
3105 		*value = (val >> 16) & 0xffff;
3106 	else
3107 		*value = val & 0xffff;
3108 }
3109 
3110 void
3111 em_pci_set_mwi(struct em_hw *hw)
3112 {
3113 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3114 
3115 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3116 		(hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE));
3117 }
3118 
3119 void
3120 em_pci_clear_mwi(struct em_hw *hw)
3121 {
3122 	struct pci_attach_args *pa = &((struct em_osdep *)hw->back)->em_pa;
3123 
3124 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
3125 		(hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE));
3126 }
3127 
3128 /*
3129  * We may eventually really do this, but its unnecessary
3130  * for now so we just return unsupported.
3131  */
3132 int32_t
3133 em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3134 {
3135 	return -E1000_NOT_IMPLEMENTED;
3136 }
3137 
3138 /*********************************************************************
3139 * 82544 Coexistence issue workaround.
3140 *    There are 2 issues.
3141 *       1. Transmit Hang issue.
3142 *    To detect this issue, following equation can be used...
3143 *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3144 *          If SUM[3:0] is in between 1 to 4, we will have this issue.
3145 *
3146 *       2. DAC issue.
3147 *    To detect this issue, following equation can be used...
3148 *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3149 *          If SUM[3:0] is in between 9 to c, we will have this issue.
3150 *
3151 *
3152 *    WORKAROUND:
3153 *          Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC)
3154 *
3155 *** *********************************************************************/
3156 u_int32_t
3157 em_fill_descriptors(u_int64_t address, u_int32_t length,
3158     PDESC_ARRAY desc_array)
3159 {
3160         /* Since issue is sensitive to length and address.*/
3161         /* Let us first check the address...*/
3162         u_int32_t safe_terminator;
3163         if (length <= 4) {
3164                 desc_array->descriptor[0].address = address;
3165                 desc_array->descriptor[0].length = length;
3166                 desc_array->elements = 1;
3167                 return desc_array->elements;
3168         }
3169         safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF);
3170         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
3171         if (safe_terminator == 0   ||
3172         (safe_terminator > 4   &&
3173         safe_terminator < 9)   ||
3174         (safe_terminator > 0xC &&
3175         safe_terminator <= 0xF)) {
3176                 desc_array->descriptor[0].address = address;
3177                 desc_array->descriptor[0].length = length;
3178                 desc_array->elements = 1;
3179                 return desc_array->elements;
3180         }
3181 
3182         desc_array->descriptor[0].address = address;
3183         desc_array->descriptor[0].length = length - 4;
3184         desc_array->descriptor[1].address = address + (length - 4);
3185         desc_array->descriptor[1].length = 4;
3186         desc_array->elements = 2;
3187         return desc_array->elements;
3188 }
3189 
3190 #ifndef SMALL_KERNEL
3191 /**********************************************************************
3192  *
3193  *  Update the board statistics counters.
3194  *
3195  **********************************************************************/
3196 void
3197 em_update_stats_counters(struct em_softc *sc)
3198 {
3199 	struct ifnet   *ifp = &sc->interface_data.ac_if;
3200 
3201 	sc->stats.crcerrs += E1000_READ_REG(&sc->hw, CRCERRS);
3202 	sc->stats.mpc += E1000_READ_REG(&sc->hw, MPC);
3203 	sc->stats.ecol += E1000_READ_REG(&sc->hw, ECOL);
3204 
3205 	sc->stats.latecol += E1000_READ_REG(&sc->hw, LATECOL);
3206 	sc->stats.colc += E1000_READ_REG(&sc->hw, COLC);
3207 
3208 	sc->stats.ruc += E1000_READ_REG(&sc->hw, RUC);
3209 	sc->stats.roc += E1000_READ_REG(&sc->hw, ROC);
3210 
3211 	if (sc->hw.mac_type >= em_82543) {
3212 		sc->stats.algnerrc +=
3213 		E1000_READ_REG(&sc->hw, ALGNERRC);
3214 		sc->stats.rxerrc +=
3215 		E1000_READ_REG(&sc->hw, RXERRC);
3216 		sc->stats.cexterr +=
3217 		E1000_READ_REG(&sc->hw, CEXTERR);
3218 	}
3219 
3220 #ifdef EM_DEBUG
3221 	if (sc->hw.media_type == em_media_type_copper ||
3222 	    (E1000_READ_REG(&sc->hw, STATUS) & E1000_STATUS_LU)) {
3223 		sc->stats.symerrs += E1000_READ_REG(&sc->hw, SYMERRS);
3224 		sc->stats.sec += E1000_READ_REG(&sc->hw, SEC);
3225 	}
3226 	sc->stats.scc += E1000_READ_REG(&sc->hw, SCC);
3227 
3228 	sc->stats.mcc += E1000_READ_REG(&sc->hw, MCC);
3229 	sc->stats.dc += E1000_READ_REG(&sc->hw, DC);
3230 	sc->stats.rlec += E1000_READ_REG(&sc->hw, RLEC);
3231 	sc->stats.xonrxc += E1000_READ_REG(&sc->hw, XONRXC);
3232 	sc->stats.xontxc += E1000_READ_REG(&sc->hw, XONTXC);
3233 	sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, XOFFRXC);
3234 	sc->stats.xofftxc += E1000_READ_REG(&sc->hw, XOFFTXC);
3235 	sc->stats.fcruc += E1000_READ_REG(&sc->hw, FCRUC);
3236 	sc->stats.prc64 += E1000_READ_REG(&sc->hw, PRC64);
3237 	sc->stats.prc127 += E1000_READ_REG(&sc->hw, PRC127);
3238 	sc->stats.prc255 += E1000_READ_REG(&sc->hw, PRC255);
3239 	sc->stats.prc511 += E1000_READ_REG(&sc->hw, PRC511);
3240 	sc->stats.prc1023 += E1000_READ_REG(&sc->hw, PRC1023);
3241 	sc->stats.prc1522 += E1000_READ_REG(&sc->hw, PRC1522);
3242 	sc->stats.gprc += E1000_READ_REG(&sc->hw, GPRC);
3243 	sc->stats.bprc += E1000_READ_REG(&sc->hw, BPRC);
3244 	sc->stats.mprc += E1000_READ_REG(&sc->hw, MPRC);
3245 	sc->stats.gptc += E1000_READ_REG(&sc->hw, GPTC);
3246 
3247 	/* For the 64-bit byte counters the low dword must be read first. */
3248 	/* Both registers clear on the read of the high dword */
3249 
3250 	sc->stats.gorcl += E1000_READ_REG(&sc->hw, GORCL);
3251 	sc->stats.gorch += E1000_READ_REG(&sc->hw, GORCH);
3252 	sc->stats.gotcl += E1000_READ_REG(&sc->hw, GOTCL);
3253 	sc->stats.gotch += E1000_READ_REG(&sc->hw, GOTCH);
3254 
3255 	sc->stats.rnbc += E1000_READ_REG(&sc->hw, RNBC);
3256 	sc->stats.rfc += E1000_READ_REG(&sc->hw, RFC);
3257 	sc->stats.rjc += E1000_READ_REG(&sc->hw, RJC);
3258 
3259 	sc->stats.torl += E1000_READ_REG(&sc->hw, TORL);
3260 	sc->stats.torh += E1000_READ_REG(&sc->hw, TORH);
3261 	sc->stats.totl += E1000_READ_REG(&sc->hw, TOTL);
3262 	sc->stats.toth += E1000_READ_REG(&sc->hw, TOTH);
3263 
3264 	sc->stats.tpr += E1000_READ_REG(&sc->hw, TPR);
3265 	sc->stats.tpt += E1000_READ_REG(&sc->hw, TPT);
3266 	sc->stats.ptc64 += E1000_READ_REG(&sc->hw, PTC64);
3267 	sc->stats.ptc127 += E1000_READ_REG(&sc->hw, PTC127);
3268 	sc->stats.ptc255 += E1000_READ_REG(&sc->hw, PTC255);
3269 	sc->stats.ptc511 += E1000_READ_REG(&sc->hw, PTC511);
3270 	sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, PTC1023);
3271 	sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, PTC1522);
3272 	sc->stats.mptc += E1000_READ_REG(&sc->hw, MPTC);
3273 	sc->stats.bptc += E1000_READ_REG(&sc->hw, BPTC);
3274 
3275 	if (sc->hw.mac_type >= em_82543) {
3276 		sc->stats.tncrs +=
3277 		E1000_READ_REG(&sc->hw, TNCRS);
3278 		sc->stats.tsctc +=
3279 		E1000_READ_REG(&sc->hw, TSCTC);
3280 		sc->stats.tsctfc +=
3281 		E1000_READ_REG(&sc->hw, TSCTFC);
3282 	}
3283 #endif
3284 
3285 	/* Fill out the OS statistics structure */
3286 	ifp->if_collisions = sc->stats.colc;
3287 
3288 	/* Rx Errors */
3289 	ifp->if_ierrors =
3290 	    sc->dropped_pkts +
3291 	    sc->stats.rxerrc +
3292 	    sc->stats.crcerrs +
3293 	    sc->stats.algnerrc +
3294 	    sc->stats.ruc + sc->stats.roc +
3295 	    sc->stats.mpc + sc->stats.cexterr +
3296 	    sc->rx_overruns;
3297 
3298 	/* Tx Errors */
3299 	ifp->if_oerrors = sc->stats.ecol + sc->stats.latecol +
3300 	    sc->watchdog_events;
3301 }
3302 
3303 #ifdef EM_DEBUG
3304 /**********************************************************************
3305  *
3306  *  This routine is called only when IFF_DEBUG is enabled.
3307  *  This routine provides a way to take a look at important statistics
3308  *  maintained by the driver and hardware.
3309  *
3310  **********************************************************************/
3311 void
3312 em_print_hw_stats(struct em_softc *sc)
3313 {
3314 	const char * const unit = sc->sc_dv.dv_xname;
3315 
3316 	printf("%s: Excessive collisions = %lld\n", unit,
3317 		(long long)sc->stats.ecol);
3318 	printf("%s: Symbol errors = %lld\n", unit,
3319 		(long long)sc->stats.symerrs);
3320 	printf("%s: Sequence errors = %lld\n", unit,
3321 		(long long)sc->stats.sec);
3322 	printf("%s: Defer count = %lld\n", unit,
3323 		(long long)sc->stats.dc);
3324 
3325 	printf("%s: Missed Packets = %lld\n", unit,
3326 		(long long)sc->stats.mpc);
3327 	printf("%s: Receive No Buffers = %lld\n", unit,
3328 		(long long)sc->stats.rnbc);
3329 	/* RLEC is inaccurate on some hardware, calculate our own */
3330 	printf("%s: Receive Length Errors = %lld\n", unit,
3331 		((long long)sc->stats.roc +
3332 		(long long)sc->stats.ruc));
3333 	printf("%s: Receive errors = %lld\n", unit,
3334 		(long long)sc->stats.rxerrc);
3335 	printf("%s: Crc errors = %lld\n", unit,
3336 		(long long)sc->stats.crcerrs);
3337 	printf("%s: Alignment errors = %lld\n", unit,
3338 		(long long)sc->stats.algnerrc);
3339 	printf("%s: Carrier extension errors = %lld\n", unit,
3340 		(long long)sc->stats.cexterr);
3341 
3342 	printf("%s: RX overruns = %ld\n", unit,
3343 		sc->rx_overruns);
3344 	printf("%s: watchdog timeouts = %ld\n", unit,
3345 		sc->watchdog_events);
3346 
3347 	printf("%s: XON Rcvd = %lld\n", unit,
3348 		(long long)sc->stats.xonrxc);
3349 	printf("%s: XON Xmtd = %lld\n", unit,
3350 		(long long)sc->stats.xontxc);
3351 	printf("%s: XOFF Rcvd = %lld\n", unit,
3352 		(long long)sc->stats.xoffrxc);
3353 	printf("%s: XOFF Xmtd = %lld\n", unit,
3354 		(long long)sc->stats.xofftxc);
3355 
3356 	printf("%s: Good Packets Rcvd = %lld\n", unit,
3357 		(long long)sc->stats.gprc);
3358 	printf("%s: Good Packets Xmtd = %lld\n", unit,
3359 		(long long)sc->stats.gptc);
3360 }
3361 #endif
3362 #endif /* !SMALL_KERNEL */
3363