xref: /onnv-gate/usr/src/uts/common/io/ixgbe/ixgbe_main.c (revision 6621:c38d03b60f12)
1*6621Sbt150084 /*
2*6621Sbt150084  * CDDL HEADER START
3*6621Sbt150084  *
4*6621Sbt150084  * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
5*6621Sbt150084  * The contents of this file are subject to the terms of the
6*6621Sbt150084  * Common Development and Distribution License (the "License").
7*6621Sbt150084  * You may not use this file except in compliance with the License.
8*6621Sbt150084  *
9*6621Sbt150084  * You can obtain a copy of the license at:
10*6621Sbt150084  *      http://www.opensolaris.org/os/licensing.
11*6621Sbt150084  * See the License for the specific language governing permissions
12*6621Sbt150084  * and limitations under the License.
13*6621Sbt150084  *
14*6621Sbt150084  * When using or redistributing this file, you may do so under the
15*6621Sbt150084  * License only. No other modification of this header is permitted.
16*6621Sbt150084  *
17*6621Sbt150084  * If applicable, add the following below this CDDL HEADER, with the
18*6621Sbt150084  * fields enclosed by brackets "[]" replaced with your own identifying
19*6621Sbt150084  * information: Portions Copyright [yyyy] [name of copyright owner]
20*6621Sbt150084  *
21*6621Sbt150084  * CDDL HEADER END
22*6621Sbt150084  */
23*6621Sbt150084 
24*6621Sbt150084 /*
25*6621Sbt150084  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
26*6621Sbt150084  * Use is subject to license terms of the CDDL.
27*6621Sbt150084  */
28*6621Sbt150084 
29*6621Sbt150084 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30*6621Sbt150084 
31*6621Sbt150084 #include "ixgbe_sw.h"
32*6621Sbt150084 
33*6621Sbt150084 static char ident[] = "Intel 10Gb Ethernet 1.0.0";
34*6621Sbt150084 
35*6621Sbt150084 /*
36*6621Sbt150084  * Local function protoypes
37*6621Sbt150084  */
38*6621Sbt150084 static int ixgbe_register_mac(ixgbe_t *);
39*6621Sbt150084 static int ixgbe_identify_hardware(ixgbe_t *);
40*6621Sbt150084 static int ixgbe_regs_map(ixgbe_t *);
41*6621Sbt150084 static void ixgbe_init_properties(ixgbe_t *);
42*6621Sbt150084 static int ixgbe_init_driver_settings(ixgbe_t *);
43*6621Sbt150084 static void ixgbe_init_locks(ixgbe_t *);
44*6621Sbt150084 static void ixgbe_destroy_locks(ixgbe_t *);
45*6621Sbt150084 static int ixgbe_init(ixgbe_t *);
46*6621Sbt150084 static int ixgbe_chip_start(ixgbe_t *);
47*6621Sbt150084 static void ixgbe_chip_stop(ixgbe_t *);
48*6621Sbt150084 static int ixgbe_reset(ixgbe_t *);
49*6621Sbt150084 static void ixgbe_tx_clean(ixgbe_t *);
50*6621Sbt150084 static boolean_t ixgbe_tx_drain(ixgbe_t *);
51*6621Sbt150084 static boolean_t ixgbe_rx_drain(ixgbe_t *);
52*6621Sbt150084 static int ixgbe_alloc_rings(ixgbe_t *);
53*6621Sbt150084 static int ixgbe_init_rings(ixgbe_t *);
54*6621Sbt150084 static void ixgbe_free_rings(ixgbe_t *);
55*6621Sbt150084 static void ixgbe_fini_rings(ixgbe_t *);
56*6621Sbt150084 static void ixgbe_setup_rings(ixgbe_t *);
57*6621Sbt150084 static void ixgbe_setup_rx(ixgbe_t *);
58*6621Sbt150084 static void ixgbe_setup_tx(ixgbe_t *);
59*6621Sbt150084 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
60*6621Sbt150084 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
61*6621Sbt150084 static void ixgbe_setup_rss(ixgbe_t *);
62*6621Sbt150084 static void ixgbe_init_unicst(ixgbe_t *);
63*6621Sbt150084 static void ixgbe_setup_multicst(ixgbe_t *);
64*6621Sbt150084 static void ixgbe_get_hw_state(ixgbe_t *);
65*6621Sbt150084 static void ixgbe_get_conf(ixgbe_t *);
66*6621Sbt150084 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
67*6621Sbt150084 static boolean_t ixgbe_driver_link_check(ixgbe_t *);
68*6621Sbt150084 static void ixgbe_local_timer(void *);
69*6621Sbt150084 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
70*6621Sbt150084 static void ixgbe_start_watchdog_timer(ixgbe_t *);
71*6621Sbt150084 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
72*6621Sbt150084 static void ixgbe_stop_watchdog_timer(ixgbe_t *);
73*6621Sbt150084 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
74*6621Sbt150084 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
75*6621Sbt150084 static boolean_t is_valid_mac_addr(uint8_t *);
76*6621Sbt150084 static boolean_t ixgbe_stall_check(ixgbe_t *);
77*6621Sbt150084 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
78*6621Sbt150084 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
79*6621Sbt150084 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
80*6621Sbt150084 static int ixgbe_alloc_intrs(ixgbe_t *);
81*6621Sbt150084 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
82*6621Sbt150084 static int ixgbe_add_intr_handlers(ixgbe_t *);
83*6621Sbt150084 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
84*6621Sbt150084 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
85*6621Sbt150084 static void ixgbe_set_ivar(ixgbe_t *, uint16_t, uint8_t);
86*6621Sbt150084 static int ixgbe_map_rings_to_vectors(ixgbe_t *);
87*6621Sbt150084 static void ixgbe_setup_adapter_vector(ixgbe_t *);
88*6621Sbt150084 static void ixgbe_rem_intr_handlers(ixgbe_t *);
89*6621Sbt150084 static void ixgbe_rem_intrs(ixgbe_t *);
90*6621Sbt150084 static int ixgbe_enable_intrs(ixgbe_t *);
91*6621Sbt150084 static int ixgbe_disable_intrs(ixgbe_t *);
92*6621Sbt150084 static uint_t ixgbe_intr_legacy(void *, void *);
93*6621Sbt150084 static uint_t ixgbe_intr_msi(void *, void *);
94*6621Sbt150084 static uint_t ixgbe_intr_rx(void *, void *);
95*6621Sbt150084 static uint_t ixgbe_intr_tx_other(void *, void *);
96*6621Sbt150084 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
97*6621Sbt150084 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
98*6621Sbt150084 static void ixgbe_intr_other_work(ixgbe_t *);
99*6621Sbt150084 static void ixgbe_get_driver_control(struct ixgbe_hw *);
100*6621Sbt150084 static void ixgbe_release_driver_control(struct ixgbe_hw *);
101*6621Sbt150084 
102*6621Sbt150084 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
103*6621Sbt150084 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
104*6621Sbt150084 static int ixgbe_resume(dev_info_t *);
105*6621Sbt150084 static int ixgbe_suspend(dev_info_t *);
106*6621Sbt150084 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
107*6621Sbt150084 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
108*6621Sbt150084 
109*6621Sbt150084 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
110*6621Sbt150084     const void *impl_data);
111*6621Sbt150084 static void ixgbe_fm_init(ixgbe_t *);
112*6621Sbt150084 static void ixgbe_fm_fini(ixgbe_t *);
113*6621Sbt150084 
114*6621Sbt150084 static struct cb_ops ixgbe_cb_ops = {
115*6621Sbt150084 	nulldev,		/* cb_open */
116*6621Sbt150084 	nulldev,		/* cb_close */
117*6621Sbt150084 	nodev,			/* cb_strategy */
118*6621Sbt150084 	nodev,			/* cb_print */
119*6621Sbt150084 	nodev,			/* cb_dump */
120*6621Sbt150084 	nodev,			/* cb_read */
121*6621Sbt150084 	nodev,			/* cb_write */
122*6621Sbt150084 	nodev,			/* cb_ioctl */
123*6621Sbt150084 	nodev,			/* cb_devmap */
124*6621Sbt150084 	nodev,			/* cb_mmap */
125*6621Sbt150084 	nodev,			/* cb_segmap */
126*6621Sbt150084 	nochpoll,		/* cb_chpoll */
127*6621Sbt150084 	ddi_prop_op,		/* cb_prop_op */
128*6621Sbt150084 	NULL,			/* cb_stream */
129*6621Sbt150084 	D_MP | D_HOTPLUG,	/* cb_flag */
130*6621Sbt150084 	CB_REV,			/* cb_rev */
131*6621Sbt150084 	nodev,			/* cb_aread */
132*6621Sbt150084 	nodev			/* cb_awrite */
133*6621Sbt150084 };
134*6621Sbt150084 
135*6621Sbt150084 static struct dev_ops ixgbe_dev_ops = {
136*6621Sbt150084 	DEVO_REV,		/* devo_rev */
137*6621Sbt150084 	0,			/* devo_refcnt */
138*6621Sbt150084 	NULL,			/* devo_getinfo */
139*6621Sbt150084 	nulldev,		/* devo_identify */
140*6621Sbt150084 	nulldev,		/* devo_probe */
141*6621Sbt150084 	ixgbe_attach,		/* devo_attach */
142*6621Sbt150084 	ixgbe_detach,		/* devo_detach */
143*6621Sbt150084 	nodev,			/* devo_reset */
144*6621Sbt150084 	&ixgbe_cb_ops,		/* devo_cb_ops */
145*6621Sbt150084 	NULL,			/* devo_bus_ops */
146*6621Sbt150084 	ddi_power		/* devo_power */
147*6621Sbt150084 };
148*6621Sbt150084 
149*6621Sbt150084 static struct modldrv ixgbe_modldrv = {
150*6621Sbt150084 	&mod_driverops,		/* Type of module.  This one is a driver */
151*6621Sbt150084 	ident,			/* Discription string */
152*6621Sbt150084 	&ixgbe_dev_ops		/* driver ops */
153*6621Sbt150084 };
154*6621Sbt150084 
155*6621Sbt150084 static struct modlinkage ixgbe_modlinkage = {
156*6621Sbt150084 	MODREV_1, &ixgbe_modldrv, NULL
157*6621Sbt150084 };
158*6621Sbt150084 
159*6621Sbt150084 /*
160*6621Sbt150084  * Access attributes for register mapping
161*6621Sbt150084  */
162*6621Sbt150084 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
163*6621Sbt150084 	DDI_DEVICE_ATTR_V0,
164*6621Sbt150084 	DDI_STRUCTURE_LE_ACC,
165*6621Sbt150084 	DDI_STRICTORDER_ACC,
166*6621Sbt150084 	DDI_FLAGERR_ACC
167*6621Sbt150084 };
168*6621Sbt150084 
169*6621Sbt150084 /*
170*6621Sbt150084  * Loopback property
171*6621Sbt150084  */
172*6621Sbt150084 static lb_property_t lb_normal = {
173*6621Sbt150084 	normal,	"normal", IXGBE_LB_NONE
174*6621Sbt150084 };
175*6621Sbt150084 
176*6621Sbt150084 static lb_property_t lb_mac = {
177*6621Sbt150084 	internal, "MAC", IXGBE_LB_INTERNAL_MAC
178*6621Sbt150084 };
179*6621Sbt150084 
180*6621Sbt150084 #define	IXGBE_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
181*6621Sbt150084 
182*6621Sbt150084 static mac_callbacks_t ixgbe_m_callbacks = {
183*6621Sbt150084 	IXGBE_M_CALLBACK_FLAGS,
184*6621Sbt150084 	ixgbe_m_stat,
185*6621Sbt150084 	ixgbe_m_start,
186*6621Sbt150084 	ixgbe_m_stop,
187*6621Sbt150084 	ixgbe_m_promisc,
188*6621Sbt150084 	ixgbe_m_multicst,
189*6621Sbt150084 	ixgbe_m_unicst,
190*6621Sbt150084 	ixgbe_m_tx,
191*6621Sbt150084 	NULL,
192*6621Sbt150084 	ixgbe_m_ioctl,
193*6621Sbt150084 	ixgbe_m_getcapab
194*6621Sbt150084 };
195*6621Sbt150084 
196*6621Sbt150084 /*
197*6621Sbt150084  * Module Initialization Functions.
198*6621Sbt150084  */
199*6621Sbt150084 
200*6621Sbt150084 int
201*6621Sbt150084 _init(void)
202*6621Sbt150084 {
203*6621Sbt150084 	int status;
204*6621Sbt150084 
205*6621Sbt150084 	mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
206*6621Sbt150084 
207*6621Sbt150084 	status = mod_install(&ixgbe_modlinkage);
208*6621Sbt150084 
209*6621Sbt150084 	if (status != DDI_SUCCESS) {
210*6621Sbt150084 		mac_fini_ops(&ixgbe_dev_ops);
211*6621Sbt150084 	}
212*6621Sbt150084 
213*6621Sbt150084 	return (status);
214*6621Sbt150084 }
215*6621Sbt150084 
216*6621Sbt150084 int
217*6621Sbt150084 _fini(void)
218*6621Sbt150084 {
219*6621Sbt150084 	int status;
220*6621Sbt150084 
221*6621Sbt150084 	status = mod_remove(&ixgbe_modlinkage);
222*6621Sbt150084 
223*6621Sbt150084 	if (status == DDI_SUCCESS) {
224*6621Sbt150084 		mac_fini_ops(&ixgbe_dev_ops);
225*6621Sbt150084 	}
226*6621Sbt150084 
227*6621Sbt150084 	return (status);
228*6621Sbt150084 }
229*6621Sbt150084 
230*6621Sbt150084 int
231*6621Sbt150084 _info(struct modinfo *modinfop)
232*6621Sbt150084 {
233*6621Sbt150084 	int status;
234*6621Sbt150084 
235*6621Sbt150084 	status = mod_info(&ixgbe_modlinkage, modinfop);
236*6621Sbt150084 
237*6621Sbt150084 	return (status);
238*6621Sbt150084 }
239*6621Sbt150084 
240*6621Sbt150084 /*
241*6621Sbt150084  * ixgbe_attach - Driver attach.
242*6621Sbt150084  *
243*6621Sbt150084  * This function is the device specific initialization entry
244*6621Sbt150084  * point. This entry point is required and must be written.
245*6621Sbt150084  * The DDI_ATTACH command must be provided in the attach entry
246*6621Sbt150084  * point. When attach() is called with cmd set to DDI_ATTACH,
247*6621Sbt150084  * all normal kernel services (such as kmem_alloc(9F)) are
248*6621Sbt150084  * available for use by the driver.
249*6621Sbt150084  *
250*6621Sbt150084  * The attach() function will be called once for each instance
251*6621Sbt150084  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
252*6621Sbt150084  * Until attach() succeeds, the only driver entry points which
253*6621Sbt150084  * may be called are open(9E) and getinfo(9E).
254*6621Sbt150084  */
255*6621Sbt150084 static int
256*6621Sbt150084 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
257*6621Sbt150084 {
258*6621Sbt150084 	ixgbe_t *ixgbe;
259*6621Sbt150084 	struct ixgbe_osdep *osdep;
260*6621Sbt150084 	struct ixgbe_hw *hw;
261*6621Sbt150084 	int instance;
262*6621Sbt150084 
263*6621Sbt150084 	/*
264*6621Sbt150084 	 * Check the command and perform corresponding operations
265*6621Sbt150084 	 */
266*6621Sbt150084 	switch (cmd) {
267*6621Sbt150084 	default:
268*6621Sbt150084 		return (DDI_FAILURE);
269*6621Sbt150084 
270*6621Sbt150084 	case DDI_RESUME:
271*6621Sbt150084 		return (ixgbe_resume(devinfo));
272*6621Sbt150084 
273*6621Sbt150084 	case DDI_ATTACH:
274*6621Sbt150084 		break;
275*6621Sbt150084 	}
276*6621Sbt150084 
277*6621Sbt150084 	/* Get the device instance */
278*6621Sbt150084 	instance = ddi_get_instance(devinfo);
279*6621Sbt150084 
280*6621Sbt150084 	/* Allocate memory for the instance data structure */
281*6621Sbt150084 	ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
282*6621Sbt150084 
283*6621Sbt150084 	ixgbe->dip = devinfo;
284*6621Sbt150084 	ixgbe->instance = instance;
285*6621Sbt150084 
286*6621Sbt150084 	hw = &ixgbe->hw;
287*6621Sbt150084 	osdep = &ixgbe->osdep;
288*6621Sbt150084 	hw->back = osdep;
289*6621Sbt150084 	osdep->ixgbe = ixgbe;
290*6621Sbt150084 
291*6621Sbt150084 	/* Attach the instance pointer to the dev_info data structure */
292*6621Sbt150084 	ddi_set_driver_private(devinfo, ixgbe);
293*6621Sbt150084 
294*6621Sbt150084 	/*
295*6621Sbt150084 	 * Initialize for fma support
296*6621Sbt150084 	 */
297*6621Sbt150084 	ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, "PROP_FM_CAPABLE",
298*6621Sbt150084 	    0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
299*6621Sbt150084 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
300*6621Sbt150084 	ixgbe_fm_init(ixgbe);
301*6621Sbt150084 	ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
302*6621Sbt150084 
303*6621Sbt150084 	/*
304*6621Sbt150084 	 * Map PCI config space registers
305*6621Sbt150084 	 */
306*6621Sbt150084 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
307*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to map PCI configurations");
308*6621Sbt150084 		goto attach_fail;
309*6621Sbt150084 	}
310*6621Sbt150084 	ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
311*6621Sbt150084 
312*6621Sbt150084 	/*
313*6621Sbt150084 	 * Identify the chipset family
314*6621Sbt150084 	 */
315*6621Sbt150084 	if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
316*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to identify hardware");
317*6621Sbt150084 		goto attach_fail;
318*6621Sbt150084 	}
319*6621Sbt150084 
320*6621Sbt150084 	/*
321*6621Sbt150084 	 * Map device registers
322*6621Sbt150084 	 */
323*6621Sbt150084 	if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
324*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to map device registers");
325*6621Sbt150084 		goto attach_fail;
326*6621Sbt150084 	}
327*6621Sbt150084 	ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
328*6621Sbt150084 
329*6621Sbt150084 	/*
330*6621Sbt150084 	 * Initialize driver parameters
331*6621Sbt150084 	 */
332*6621Sbt150084 	ixgbe_init_properties(ixgbe);
333*6621Sbt150084 	ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
334*6621Sbt150084 
335*6621Sbt150084 	/*
336*6621Sbt150084 	 * Allocate interrupts
337*6621Sbt150084 	 */
338*6621Sbt150084 	if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
339*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to allocate interrupts");
340*6621Sbt150084 		goto attach_fail;
341*6621Sbt150084 	}
342*6621Sbt150084 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
343*6621Sbt150084 
344*6621Sbt150084 	/*
345*6621Sbt150084 	 * Allocate rx/tx rings based on the ring numbers.
346*6621Sbt150084 	 * The actual numbers of rx/tx rings are decided by the number of
347*6621Sbt150084 	 * allocated interrupt vectors, so we should allocate the rings after
348*6621Sbt150084 	 * interrupts are allocated.
349*6621Sbt150084 	 */
350*6621Sbt150084 	if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
351*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
352*6621Sbt150084 		goto attach_fail;
353*6621Sbt150084 	}
354*6621Sbt150084 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
355*6621Sbt150084 
356*6621Sbt150084 	/*
357*6621Sbt150084 	 * Map rings to interrupt vectors
358*6621Sbt150084 	 */
359*6621Sbt150084 	if (ixgbe_map_rings_to_vectors(ixgbe) != IXGBE_SUCCESS) {
360*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to map rings to vectors");
361*6621Sbt150084 		goto attach_fail;
362*6621Sbt150084 	}
363*6621Sbt150084 
364*6621Sbt150084 	/*
365*6621Sbt150084 	 * Add interrupt handlers
366*6621Sbt150084 	 */
367*6621Sbt150084 	if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
368*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to add interrupt handlers");
369*6621Sbt150084 		goto attach_fail;
370*6621Sbt150084 	}
371*6621Sbt150084 	ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
372*6621Sbt150084 
373*6621Sbt150084 	/*
374*6621Sbt150084 	 * Initialize driver parameters
375*6621Sbt150084 	 */
376*6621Sbt150084 	if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
377*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to initialize driver settings");
378*6621Sbt150084 		goto attach_fail;
379*6621Sbt150084 	}
380*6621Sbt150084 
381*6621Sbt150084 	/*
382*6621Sbt150084 	 * Initialize mutexes for this device.
383*6621Sbt150084 	 * Do this before enabling the interrupt handler and
384*6621Sbt150084 	 * register the softint to avoid the condition where
385*6621Sbt150084 	 * interrupt handler can try using uninitialized mutex.
386*6621Sbt150084 	 */
387*6621Sbt150084 	ixgbe_init_locks(ixgbe);
388*6621Sbt150084 	ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
389*6621Sbt150084 
390*6621Sbt150084 	/*
391*6621Sbt150084 	 * Initialize chipset hardware
392*6621Sbt150084 	 */
393*6621Sbt150084 	if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
394*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to initialize adapter");
395*6621Sbt150084 		goto attach_fail;
396*6621Sbt150084 	}
397*6621Sbt150084 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
398*6621Sbt150084 
399*6621Sbt150084 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
400*6621Sbt150084 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
401*6621Sbt150084 		goto attach_fail;
402*6621Sbt150084 	}
403*6621Sbt150084 
404*6621Sbt150084 	/*
405*6621Sbt150084 	 * Initialize DMA and hardware settings for rx/tx rings
406*6621Sbt150084 	 */
407*6621Sbt150084 	if (ixgbe_init_rings(ixgbe) != IXGBE_SUCCESS) {
408*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to initialize rings");
409*6621Sbt150084 		goto attach_fail;
410*6621Sbt150084 	}
411*6621Sbt150084 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT_RINGS;
412*6621Sbt150084 
413*6621Sbt150084 	/*
414*6621Sbt150084 	 * Initialize statistics
415*6621Sbt150084 	 */
416*6621Sbt150084 	if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
417*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to initialize statistics");
418*6621Sbt150084 		goto attach_fail;
419*6621Sbt150084 	}
420*6621Sbt150084 	ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
421*6621Sbt150084 
422*6621Sbt150084 	/*
423*6621Sbt150084 	 * Initialize NDD parameters
424*6621Sbt150084 	 */
425*6621Sbt150084 	if (ixgbe_nd_init(ixgbe) != IXGBE_SUCCESS) {
426*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to initialize ndd");
427*6621Sbt150084 		goto attach_fail;
428*6621Sbt150084 	}
429*6621Sbt150084 	ixgbe->attach_progress |= ATTACH_PROGRESS_NDD;
430*6621Sbt150084 
431*6621Sbt150084 	/*
432*6621Sbt150084 	 * Register the driver to the MAC
433*6621Sbt150084 	 */
434*6621Sbt150084 	if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
435*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to register MAC");
436*6621Sbt150084 		goto attach_fail;
437*6621Sbt150084 	}
438*6621Sbt150084 	ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
439*6621Sbt150084 
440*6621Sbt150084 	/*
441*6621Sbt150084 	 * Now that mutex locks are initialized, and the chip is also
442*6621Sbt150084 	 * initialized, enable interrupts.
443*6621Sbt150084 	 */
444*6621Sbt150084 	if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
445*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
446*6621Sbt150084 		goto attach_fail;
447*6621Sbt150084 	}
448*6621Sbt150084 	ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
449*6621Sbt150084 
450*6621Sbt150084 	ixgbe->ixgbe_state |= IXGBE_INITIALIZED;
451*6621Sbt150084 
452*6621Sbt150084 	return (DDI_SUCCESS);
453*6621Sbt150084 
454*6621Sbt150084 attach_fail:
455*6621Sbt150084 	ixgbe_unconfigure(devinfo, ixgbe);
456*6621Sbt150084 	return (DDI_FAILURE);
457*6621Sbt150084 }
458*6621Sbt150084 
459*6621Sbt150084 /*
460*6621Sbt150084  * ixgbe_detach - Driver detach.
461*6621Sbt150084  *
462*6621Sbt150084  * The detach() function is the complement of the attach routine.
463*6621Sbt150084  * If cmd is set to DDI_DETACH, detach() is used to remove  the
464*6621Sbt150084  * state  associated  with  a  given  instance of a device node
465*6621Sbt150084  * prior to the removal of that instance from the system.
466*6621Sbt150084  *
467*6621Sbt150084  * The detach() function will be called once for each  instance
468*6621Sbt150084  * of the device for which there has been a successful attach()
469*6621Sbt150084  * once there are no longer  any  opens  on  the  device.
470*6621Sbt150084  *
471*6621Sbt150084  * Interrupts routine are disabled, All memory allocated by this
472*6621Sbt150084  * driver are freed.
473*6621Sbt150084  */
474*6621Sbt150084 static int
475*6621Sbt150084 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
476*6621Sbt150084 {
477*6621Sbt150084 	ixgbe_t *ixgbe;
478*6621Sbt150084 
479*6621Sbt150084 	/*
480*6621Sbt150084 	 * Check detach command
481*6621Sbt150084 	 */
482*6621Sbt150084 	switch (cmd) {
483*6621Sbt150084 	default:
484*6621Sbt150084 		return (DDI_FAILURE);
485*6621Sbt150084 
486*6621Sbt150084 	case DDI_SUSPEND:
487*6621Sbt150084 		return (ixgbe_suspend(devinfo));
488*6621Sbt150084 
489*6621Sbt150084 	case DDI_DETACH:
490*6621Sbt150084 		break;
491*6621Sbt150084 	}
492*6621Sbt150084 
493*6621Sbt150084 
494*6621Sbt150084 	/*
495*6621Sbt150084 	 * Get the pointer to the driver private data structure
496*6621Sbt150084 	 */
497*6621Sbt150084 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
498*6621Sbt150084 	if (ixgbe == NULL)
499*6621Sbt150084 		return (DDI_FAILURE);
500*6621Sbt150084 
501*6621Sbt150084 	/*
502*6621Sbt150084 	 * Unregister MAC. If failed, we have to fail the detach
503*6621Sbt150084 	 */
504*6621Sbt150084 	if (mac_unregister(ixgbe->mac_hdl) != 0) {
505*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to unregister MAC");
506*6621Sbt150084 		return (DDI_FAILURE);
507*6621Sbt150084 	}
508*6621Sbt150084 	ixgbe->attach_progress &= ~ATTACH_PROGRESS_MAC;
509*6621Sbt150084 
510*6621Sbt150084 	/*
511*6621Sbt150084 	 * If the device is still running, it needs to be stopped first.
512*6621Sbt150084 	 * This check is necessary because under some specific circumstances,
513*6621Sbt150084 	 * the detach routine can be called without stopping the interface
514*6621Sbt150084 	 * first.
515*6621Sbt150084 	 */
516*6621Sbt150084 	mutex_enter(&ixgbe->gen_lock);
517*6621Sbt150084 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
518*6621Sbt150084 		ixgbe->ixgbe_state &= ~IXGBE_STARTED;
519*6621Sbt150084 		ixgbe_stop(ixgbe);
520*6621Sbt150084 		mutex_exit(&ixgbe->gen_lock);
521*6621Sbt150084 		/* Disable and stop the watchdog timer */
522*6621Sbt150084 		ixgbe_disable_watchdog_timer(ixgbe);
523*6621Sbt150084 	} else
524*6621Sbt150084 		mutex_exit(&ixgbe->gen_lock);
525*6621Sbt150084 
526*6621Sbt150084 	/*
527*6621Sbt150084 	 * Check if there are still rx buffers held by the upper layer.
528*6621Sbt150084 	 * If so, fail the detach.
529*6621Sbt150084 	 */
530*6621Sbt150084 	if (!ixgbe_rx_drain(ixgbe))
531*6621Sbt150084 		return (DDI_FAILURE);
532*6621Sbt150084 
533*6621Sbt150084 	/*
534*6621Sbt150084 	 * Do the remaining unconfigure routines
535*6621Sbt150084 	 */
536*6621Sbt150084 	ixgbe_unconfigure(devinfo, ixgbe);
537*6621Sbt150084 
538*6621Sbt150084 	return (DDI_SUCCESS);
539*6621Sbt150084 }
540*6621Sbt150084 
541*6621Sbt150084 static void
542*6621Sbt150084 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
543*6621Sbt150084 {
544*6621Sbt150084 	/*
545*6621Sbt150084 	 * Disable interrupt
546*6621Sbt150084 	 */
547*6621Sbt150084 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
548*6621Sbt150084 		(void) ixgbe_disable_intrs(ixgbe);
549*6621Sbt150084 	}
550*6621Sbt150084 
551*6621Sbt150084 	/*
552*6621Sbt150084 	 * Unregister MAC
553*6621Sbt150084 	 */
554*6621Sbt150084 	if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
555*6621Sbt150084 		(void) mac_unregister(ixgbe->mac_hdl);
556*6621Sbt150084 	}
557*6621Sbt150084 
558*6621Sbt150084 	/*
559*6621Sbt150084 	 * Free ndd parameters
560*6621Sbt150084 	 */
561*6621Sbt150084 	if (ixgbe->attach_progress & ATTACH_PROGRESS_NDD) {
562*6621Sbt150084 		ixgbe_nd_cleanup(ixgbe);
563*6621Sbt150084 	}
564*6621Sbt150084 
565*6621Sbt150084 	/*
566*6621Sbt150084 	 * Free statistics
567*6621Sbt150084 	 */
568*6621Sbt150084 	if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
569*6621Sbt150084 		kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
570*6621Sbt150084 	}
571*6621Sbt150084 
572*6621Sbt150084 	/*
573*6621Sbt150084 	 * Remove interrupt handlers
574*6621Sbt150084 	 */
575*6621Sbt150084 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
576*6621Sbt150084 		ixgbe_rem_intr_handlers(ixgbe);
577*6621Sbt150084 	}
578*6621Sbt150084 
579*6621Sbt150084 	/*
580*6621Sbt150084 	 * Remove interrupts
581*6621Sbt150084 	 */
582*6621Sbt150084 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
583*6621Sbt150084 		ixgbe_rem_intrs(ixgbe);
584*6621Sbt150084 	}
585*6621Sbt150084 
586*6621Sbt150084 	/*
587*6621Sbt150084 	 * Remove driver properties
588*6621Sbt150084 	 */
589*6621Sbt150084 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
590*6621Sbt150084 		(void) ddi_prop_remove_all(devinfo);
591*6621Sbt150084 	}
592*6621Sbt150084 
593*6621Sbt150084 	/*
594*6621Sbt150084 	 * Release the DMA resources of rx/tx rings
595*6621Sbt150084 	 */
596*6621Sbt150084 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT_RINGS) {
597*6621Sbt150084 		ixgbe_fini_rings(ixgbe);
598*6621Sbt150084 	}
599*6621Sbt150084 
600*6621Sbt150084 	/*
601*6621Sbt150084 	 * Stop the chipset
602*6621Sbt150084 	 */
603*6621Sbt150084 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
604*6621Sbt150084 		mutex_enter(&ixgbe->gen_lock);
605*6621Sbt150084 		ixgbe_chip_stop(ixgbe);
606*6621Sbt150084 		mutex_exit(&ixgbe->gen_lock);
607*6621Sbt150084 	}
608*6621Sbt150084 
609*6621Sbt150084 	/*
610*6621Sbt150084 	 * Free register handle
611*6621Sbt150084 	 */
612*6621Sbt150084 	if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
613*6621Sbt150084 		if (ixgbe->osdep.reg_handle != NULL)
614*6621Sbt150084 			ddi_regs_map_free(&ixgbe->osdep.reg_handle);
615*6621Sbt150084 	}
616*6621Sbt150084 
617*6621Sbt150084 	/*
618*6621Sbt150084 	 * Free PCI config handle
619*6621Sbt150084 	 */
620*6621Sbt150084 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
621*6621Sbt150084 		if (ixgbe->osdep.cfg_handle != NULL)
622*6621Sbt150084 			pci_config_teardown(&ixgbe->osdep.cfg_handle);
623*6621Sbt150084 	}
624*6621Sbt150084 
625*6621Sbt150084 	/*
626*6621Sbt150084 	 * Free locks
627*6621Sbt150084 	 */
628*6621Sbt150084 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
629*6621Sbt150084 		ixgbe_destroy_locks(ixgbe);
630*6621Sbt150084 	}
631*6621Sbt150084 
632*6621Sbt150084 	/*
633*6621Sbt150084 	 * Free the rx/tx rings
634*6621Sbt150084 	 */
635*6621Sbt150084 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
636*6621Sbt150084 		ixgbe_free_rings(ixgbe);
637*6621Sbt150084 	}
638*6621Sbt150084 
639*6621Sbt150084 	/*
640*6621Sbt150084 	 * Unregister FMA capabilities
641*6621Sbt150084 	 */
642*6621Sbt150084 	if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
643*6621Sbt150084 		ixgbe_fm_fini(ixgbe);
644*6621Sbt150084 	}
645*6621Sbt150084 
646*6621Sbt150084 	/*
647*6621Sbt150084 	 * Free the driver data structure
648*6621Sbt150084 	 */
649*6621Sbt150084 	kmem_free(ixgbe, sizeof (ixgbe_t));
650*6621Sbt150084 
651*6621Sbt150084 	ddi_set_driver_private(devinfo, NULL);
652*6621Sbt150084 }
653*6621Sbt150084 
654*6621Sbt150084 /*
655*6621Sbt150084  * ixgbe_register_mac - Register the driver and its function pointers with
656*6621Sbt150084  * the GLD interface.
657*6621Sbt150084  */
658*6621Sbt150084 static int
659*6621Sbt150084 ixgbe_register_mac(ixgbe_t *ixgbe)
660*6621Sbt150084 {
661*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
662*6621Sbt150084 	mac_register_t *mac;
663*6621Sbt150084 	int status;
664*6621Sbt150084 
665*6621Sbt150084 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
666*6621Sbt150084 		return (IXGBE_FAILURE);
667*6621Sbt150084 
668*6621Sbt150084 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
669*6621Sbt150084 	mac->m_driver = ixgbe;
670*6621Sbt150084 	mac->m_dip = ixgbe->dip;
671*6621Sbt150084 	mac->m_src_addr = hw->mac.addr;
672*6621Sbt150084 	mac->m_callbacks = &ixgbe_m_callbacks;
673*6621Sbt150084 	mac->m_min_sdu = 0;
674*6621Sbt150084 	mac->m_max_sdu = ixgbe->default_mtu;
675*6621Sbt150084 	mac->m_margin = VLAN_TAGSZ;
676*6621Sbt150084 
677*6621Sbt150084 	status = mac_register(mac, &ixgbe->mac_hdl);
678*6621Sbt150084 
679*6621Sbt150084 	mac_free(mac);
680*6621Sbt150084 
681*6621Sbt150084 	return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
682*6621Sbt150084 }
683*6621Sbt150084 
684*6621Sbt150084 /*
685*6621Sbt150084  * ixgbe_identify_hardware - Identify the type of the chipset.
686*6621Sbt150084  */
687*6621Sbt150084 static int
688*6621Sbt150084 ixgbe_identify_hardware(ixgbe_t *ixgbe)
689*6621Sbt150084 {
690*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
691*6621Sbt150084 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
692*6621Sbt150084 
693*6621Sbt150084 	/*
694*6621Sbt150084 	 * Get the device id
695*6621Sbt150084 	 */
696*6621Sbt150084 	hw->vendor_id =
697*6621Sbt150084 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
698*6621Sbt150084 	hw->device_id =
699*6621Sbt150084 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
700*6621Sbt150084 	hw->revision_id =
701*6621Sbt150084 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
702*6621Sbt150084 	hw->subsystem_device_id =
703*6621Sbt150084 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
704*6621Sbt150084 	hw->subsystem_vendor_id =
705*6621Sbt150084 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
706*6621Sbt150084 
707*6621Sbt150084 	return (IXGBE_SUCCESS);
708*6621Sbt150084 }
709*6621Sbt150084 
710*6621Sbt150084 /*
711*6621Sbt150084  * ixgbe_regs_map - Map the device registers.
712*6621Sbt150084  *
713*6621Sbt150084  */
714*6621Sbt150084 static int
715*6621Sbt150084 ixgbe_regs_map(ixgbe_t *ixgbe)
716*6621Sbt150084 {
717*6621Sbt150084 	dev_info_t *devinfo = ixgbe->dip;
718*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
719*6621Sbt150084 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
720*6621Sbt150084 	off_t mem_size;
721*6621Sbt150084 
722*6621Sbt150084 	/*
723*6621Sbt150084 	 * First get the size of device registers to be mapped.
724*6621Sbt150084 	 */
725*6621Sbt150084 	if (ddi_dev_regsize(devinfo, 1, &mem_size) != DDI_SUCCESS) {
726*6621Sbt150084 		return (IXGBE_FAILURE);
727*6621Sbt150084 	}
728*6621Sbt150084 
729*6621Sbt150084 	/*
730*6621Sbt150084 	 * Call ddi_regs_map_setup() to map registers
731*6621Sbt150084 	 */
732*6621Sbt150084 	if ((ddi_regs_map_setup(devinfo, 1,
733*6621Sbt150084 	    (caddr_t *)&hw->hw_addr, 0,
734*6621Sbt150084 	    mem_size, &ixgbe_regs_acc_attr,
735*6621Sbt150084 	    &osdep->reg_handle)) != DDI_SUCCESS) {
736*6621Sbt150084 		return (IXGBE_FAILURE);
737*6621Sbt150084 	}
738*6621Sbt150084 
739*6621Sbt150084 	return (IXGBE_SUCCESS);
740*6621Sbt150084 }
741*6621Sbt150084 
742*6621Sbt150084 /*
743*6621Sbt150084  * ixgbe_init_properties - Initialize driver properties.
744*6621Sbt150084  */
745*6621Sbt150084 static void
746*6621Sbt150084 ixgbe_init_properties(ixgbe_t *ixgbe)
747*6621Sbt150084 {
748*6621Sbt150084 	/*
749*6621Sbt150084 	 * Get conf file properties, including link settings
750*6621Sbt150084 	 * jumbo frames, ring number, descriptor number, etc.
751*6621Sbt150084 	 */
752*6621Sbt150084 	ixgbe_get_conf(ixgbe);
753*6621Sbt150084 }
754*6621Sbt150084 
755*6621Sbt150084 /*
756*6621Sbt150084  * ixgbe_init_driver_settings - Initialize driver settings.
757*6621Sbt150084  *
758*6621Sbt150084  * The settings include hardware function pointers, bus information,
759*6621Sbt150084  * rx/tx rings settings, link state, and any other parameters that
760*6621Sbt150084  * need to be setup during driver initialization.
761*6621Sbt150084  */
762*6621Sbt150084 static int
763*6621Sbt150084 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
764*6621Sbt150084 {
765*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
766*6621Sbt150084 	ixgbe_rx_ring_t *rx_ring;
767*6621Sbt150084 	ixgbe_tx_ring_t *tx_ring;
768*6621Sbt150084 	uint32_t rx_size;
769*6621Sbt150084 	uint32_t tx_size;
770*6621Sbt150084 	int i;
771*6621Sbt150084 
772*6621Sbt150084 	/*
773*6621Sbt150084 	 * Initialize chipset specific hardware function pointers
774*6621Sbt150084 	 */
775*6621Sbt150084 	if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
776*6621Sbt150084 		return (IXGBE_FAILURE);
777*6621Sbt150084 	}
778*6621Sbt150084 
779*6621Sbt150084 	/*
780*6621Sbt150084 	 * Set rx buffer size
781*6621Sbt150084 	 *
782*6621Sbt150084 	 * The IP header alignment room is counted in the calculation.
783*6621Sbt150084 	 * The rx buffer size is in unit of 1K that is required by the
784*6621Sbt150084 	 * chipset hardware.
785*6621Sbt150084 	 */
786*6621Sbt150084 	rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
787*6621Sbt150084 	ixgbe->rx_buf_size = ((rx_size >> 10) +
788*6621Sbt150084 	    ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
789*6621Sbt150084 
790*6621Sbt150084 	/*
791*6621Sbt150084 	 * Set tx buffer size
792*6621Sbt150084 	 */
793*6621Sbt150084 	tx_size = ixgbe->max_frame_size;
794*6621Sbt150084 	ixgbe->tx_buf_size = ((tx_size >> 10) +
795*6621Sbt150084 	    ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
796*6621Sbt150084 
797*6621Sbt150084 	/*
798*6621Sbt150084 	 * Initialize rx/tx rings parameters
799*6621Sbt150084 	 */
800*6621Sbt150084 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
801*6621Sbt150084 		rx_ring = &ixgbe->rx_rings[i];
802*6621Sbt150084 		rx_ring->index = i;
803*6621Sbt150084 		rx_ring->ixgbe = ixgbe;
804*6621Sbt150084 
805*6621Sbt150084 		rx_ring->ring_size = ixgbe->rx_ring_size;
806*6621Sbt150084 		rx_ring->free_list_size = ixgbe->rx_ring_size;
807*6621Sbt150084 		rx_ring->copy_thresh = ixgbe->rx_copy_thresh;
808*6621Sbt150084 		rx_ring->limit_per_intr = ixgbe->rx_limit_per_intr;
809*6621Sbt150084 	}
810*6621Sbt150084 
811*6621Sbt150084 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
812*6621Sbt150084 		tx_ring = &ixgbe->tx_rings[i];
813*6621Sbt150084 		tx_ring->index = i;
814*6621Sbt150084 		tx_ring->ixgbe = ixgbe;
815*6621Sbt150084 		if (ixgbe->tx_head_wb_enable)
816*6621Sbt150084 			tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
817*6621Sbt150084 		else
818*6621Sbt150084 			tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
819*6621Sbt150084 
820*6621Sbt150084 		tx_ring->ring_size = ixgbe->tx_ring_size;
821*6621Sbt150084 		tx_ring->free_list_size = ixgbe->tx_ring_size +
822*6621Sbt150084 		    (ixgbe->tx_ring_size >> 1);
823*6621Sbt150084 		tx_ring->copy_thresh = ixgbe->tx_copy_thresh;
824*6621Sbt150084 		tx_ring->recycle_thresh = ixgbe->tx_recycle_thresh;
825*6621Sbt150084 		tx_ring->overload_thresh = ixgbe->tx_overload_thresh;
826*6621Sbt150084 	tx_ring->resched_thresh = ixgbe->tx_resched_thresh;
827*6621Sbt150084 	}
828*6621Sbt150084 
829*6621Sbt150084 	/*
830*6621Sbt150084 	 * Initialize values of interrupt throttling rate
831*6621Sbt150084 	 */
832*6621Sbt150084 	for (i = 1; i < IXGBE_MAX_RING_VECTOR; i++)
833*6621Sbt150084 		ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
834*6621Sbt150084 
835*6621Sbt150084 	/*
836*6621Sbt150084 	 * The initial link state should be "unknown"
837*6621Sbt150084 	 */
838*6621Sbt150084 	ixgbe->link_state = LINK_STATE_UNKNOWN;
839*6621Sbt150084 	return (IXGBE_SUCCESS);
840*6621Sbt150084 }
841*6621Sbt150084 
842*6621Sbt150084 /*
843*6621Sbt150084  * ixgbe_init_locks - Initialize locks.
844*6621Sbt150084  */
845*6621Sbt150084 static void
846*6621Sbt150084 ixgbe_init_locks(ixgbe_t *ixgbe)
847*6621Sbt150084 {
848*6621Sbt150084 	ixgbe_rx_ring_t *rx_ring;
849*6621Sbt150084 	ixgbe_tx_ring_t *tx_ring;
850*6621Sbt150084 	int i;
851*6621Sbt150084 
852*6621Sbt150084 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
853*6621Sbt150084 		rx_ring = &ixgbe->rx_rings[i];
854*6621Sbt150084 		mutex_init(&rx_ring->rx_lock, NULL,
855*6621Sbt150084 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
856*6621Sbt150084 		mutex_init(&rx_ring->recycle_lock, NULL,
857*6621Sbt150084 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
858*6621Sbt150084 	}
859*6621Sbt150084 
860*6621Sbt150084 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
861*6621Sbt150084 		tx_ring = &ixgbe->tx_rings[i];
862*6621Sbt150084 		mutex_init(&tx_ring->tx_lock, NULL,
863*6621Sbt150084 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
864*6621Sbt150084 		mutex_init(&tx_ring->recycle_lock, NULL,
865*6621Sbt150084 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
866*6621Sbt150084 		mutex_init(&tx_ring->tcb_head_lock, NULL,
867*6621Sbt150084 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
868*6621Sbt150084 		mutex_init(&tx_ring->tcb_tail_lock, NULL,
869*6621Sbt150084 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
870*6621Sbt150084 	}
871*6621Sbt150084 
872*6621Sbt150084 	mutex_init(&ixgbe->gen_lock, NULL,
873*6621Sbt150084 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
874*6621Sbt150084 
875*6621Sbt150084 	mutex_init(&ixgbe->watchdog_lock, NULL,
876*6621Sbt150084 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
877*6621Sbt150084 }
878*6621Sbt150084 
879*6621Sbt150084 /*
880*6621Sbt150084  * ixgbe_destroy_locks - Destroy locks.
881*6621Sbt150084  */
882*6621Sbt150084 static void
883*6621Sbt150084 ixgbe_destroy_locks(ixgbe_t *ixgbe)
884*6621Sbt150084 {
885*6621Sbt150084 	ixgbe_rx_ring_t *rx_ring;
886*6621Sbt150084 	ixgbe_tx_ring_t *tx_ring;
887*6621Sbt150084 	int i;
888*6621Sbt150084 
889*6621Sbt150084 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
890*6621Sbt150084 		rx_ring = &ixgbe->rx_rings[i];
891*6621Sbt150084 		mutex_destroy(&rx_ring->rx_lock);
892*6621Sbt150084 		mutex_destroy(&rx_ring->recycle_lock);
893*6621Sbt150084 	}
894*6621Sbt150084 
895*6621Sbt150084 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
896*6621Sbt150084 		tx_ring = &ixgbe->tx_rings[i];
897*6621Sbt150084 		mutex_destroy(&tx_ring->tx_lock);
898*6621Sbt150084 		mutex_destroy(&tx_ring->recycle_lock);
899*6621Sbt150084 		mutex_destroy(&tx_ring->tcb_head_lock);
900*6621Sbt150084 		mutex_destroy(&tx_ring->tcb_tail_lock);
901*6621Sbt150084 	}
902*6621Sbt150084 
903*6621Sbt150084 	mutex_destroy(&ixgbe->gen_lock);
904*6621Sbt150084 	mutex_destroy(&ixgbe->watchdog_lock);
905*6621Sbt150084 }
906*6621Sbt150084 
907*6621Sbt150084 static int
908*6621Sbt150084 ixgbe_resume(dev_info_t *devinfo)
909*6621Sbt150084 {
910*6621Sbt150084 	ixgbe_t *ixgbe;
911*6621Sbt150084 
912*6621Sbt150084 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
913*6621Sbt150084 	if (ixgbe == NULL)
914*6621Sbt150084 		return (DDI_FAILURE);
915*6621Sbt150084 
916*6621Sbt150084 	mutex_enter(&ixgbe->gen_lock);
917*6621Sbt150084 
918*6621Sbt150084 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
919*6621Sbt150084 		if (ixgbe_start(ixgbe) != IXGBE_SUCCESS) {
920*6621Sbt150084 			mutex_exit(&ixgbe->gen_lock);
921*6621Sbt150084 			return (DDI_FAILURE);
922*6621Sbt150084 		}
923*6621Sbt150084 
924*6621Sbt150084 		/*
925*6621Sbt150084 		 * Enable and start the watchdog timer
926*6621Sbt150084 		 */
927*6621Sbt150084 		ixgbe_enable_watchdog_timer(ixgbe);
928*6621Sbt150084 	}
929*6621Sbt150084 
930*6621Sbt150084 	ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
931*6621Sbt150084 
932*6621Sbt150084 	mutex_exit(&ixgbe->gen_lock);
933*6621Sbt150084 
934*6621Sbt150084 	return (DDI_SUCCESS);
935*6621Sbt150084 }
936*6621Sbt150084 
937*6621Sbt150084 static int
938*6621Sbt150084 ixgbe_suspend(dev_info_t *devinfo)
939*6621Sbt150084 {
940*6621Sbt150084 	ixgbe_t *ixgbe;
941*6621Sbt150084 
942*6621Sbt150084 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
943*6621Sbt150084 	if (ixgbe == NULL)
944*6621Sbt150084 		return (DDI_FAILURE);
945*6621Sbt150084 
946*6621Sbt150084 	mutex_enter(&ixgbe->gen_lock);
947*6621Sbt150084 
948*6621Sbt150084 	ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
949*6621Sbt150084 
950*6621Sbt150084 	ixgbe_stop(ixgbe);
951*6621Sbt150084 
952*6621Sbt150084 	mutex_exit(&ixgbe->gen_lock);
953*6621Sbt150084 
954*6621Sbt150084 	/*
955*6621Sbt150084 	 * Disable and stop the watchdog timer
956*6621Sbt150084 	 */
957*6621Sbt150084 	ixgbe_disable_watchdog_timer(ixgbe);
958*6621Sbt150084 
959*6621Sbt150084 	return (DDI_SUCCESS);
960*6621Sbt150084 }
961*6621Sbt150084 
962*6621Sbt150084 /*
963*6621Sbt150084  * ixgbe_init - Initialize the device.
964*6621Sbt150084  */
965*6621Sbt150084 static int
966*6621Sbt150084 ixgbe_init(ixgbe_t *ixgbe)
967*6621Sbt150084 {
968*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
969*6621Sbt150084 
970*6621Sbt150084 	mutex_enter(&ixgbe->gen_lock);
971*6621Sbt150084 
972*6621Sbt150084 	/*
973*6621Sbt150084 	 * Reset chipset to put the hardware in a known state
974*6621Sbt150084 	 * before we try to do anything with the eeprom.
975*6621Sbt150084 	 */
976*6621Sbt150084 	if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
977*6621Sbt150084 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
978*6621Sbt150084 		goto init_fail;
979*6621Sbt150084 	}
980*6621Sbt150084 
981*6621Sbt150084 	/*
982*6621Sbt150084 	 * Need to init eeprom before validating the checksum.
983*6621Sbt150084 	 */
984*6621Sbt150084 	if (ixgbe_init_eeprom_params(hw) < 0) {
985*6621Sbt150084 		ixgbe_error(ixgbe,
986*6621Sbt150084 		    "Unable to intitialize the eeprom interface.");
987*6621Sbt150084 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
988*6621Sbt150084 		goto init_fail;
989*6621Sbt150084 	}
990*6621Sbt150084 
991*6621Sbt150084 	/*
992*6621Sbt150084 	 * NVM validation
993*6621Sbt150084 	 */
994*6621Sbt150084 	if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
995*6621Sbt150084 		/*
996*6621Sbt150084 		 * Some PCI-E parts fail the first check due to
997*6621Sbt150084 		 * the link being in sleep state.  Call it again,
998*6621Sbt150084 		 * if it fails a second time it's a real issue.
999*6621Sbt150084 		 */
1000*6621Sbt150084 		if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1001*6621Sbt150084 			ixgbe_error(ixgbe,
1002*6621Sbt150084 			    "Invalid NVM checksum. Please contact "
1003*6621Sbt150084 			    "the vendor to update the NVM.");
1004*6621Sbt150084 			ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1005*6621Sbt150084 			goto init_fail;
1006*6621Sbt150084 		}
1007*6621Sbt150084 	}
1008*6621Sbt150084 
1009*6621Sbt150084 	/*
1010*6621Sbt150084 	 * Setup default flow control thresholds - enable/disable
1011*6621Sbt150084 	 * & flow control type is controlled by ixgbe.conf
1012*6621Sbt150084 	 */
1013*6621Sbt150084 	hw->fc.high_water = DEFAULT_FCRTH;
1014*6621Sbt150084 	hw->fc.low_water = DEFAULT_FCRTL;
1015*6621Sbt150084 	hw->fc.pause_time = DEFAULT_FCPAUSE;
1016*6621Sbt150084 	hw->fc.send_xon = B_TRUE;
1017*6621Sbt150084 
1018*6621Sbt150084 	/*
1019*6621Sbt150084 	 * Don't wait for auto-negotiation to complete
1020*6621Sbt150084 	 */
1021*6621Sbt150084 	hw->phy.autoneg_wait_to_complete = B_FALSE;
1022*6621Sbt150084 
1023*6621Sbt150084 	/*
1024*6621Sbt150084 	 * Initialize link settings
1025*6621Sbt150084 	 */
1026*6621Sbt150084 	(void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1027*6621Sbt150084 
1028*6621Sbt150084 	/*
1029*6621Sbt150084 	 * Initialize the chipset hardware
1030*6621Sbt150084 	 */
1031*6621Sbt150084 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1032*6621Sbt150084 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1033*6621Sbt150084 		goto init_fail;
1034*6621Sbt150084 	}
1035*6621Sbt150084 
1036*6621Sbt150084 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
1037*6621Sbt150084 		goto init_fail;
1038*6621Sbt150084 	}
1039*6621Sbt150084 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1040*6621Sbt150084 		goto init_fail;
1041*6621Sbt150084 	}
1042*6621Sbt150084 
1043*6621Sbt150084 	mutex_exit(&ixgbe->gen_lock);
1044*6621Sbt150084 	return (IXGBE_SUCCESS);
1045*6621Sbt150084 
1046*6621Sbt150084 init_fail:
1047*6621Sbt150084 	/*
1048*6621Sbt150084 	 * Reset PHY
1049*6621Sbt150084 	 */
1050*6621Sbt150084 	(void) ixgbe_reset_phy(hw);
1051*6621Sbt150084 
1052*6621Sbt150084 	mutex_exit(&ixgbe->gen_lock);
1053*6621Sbt150084 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1054*6621Sbt150084 	return (IXGBE_FAILURE);
1055*6621Sbt150084 }
1056*6621Sbt150084 
1057*6621Sbt150084 /*
1058*6621Sbt150084  * ixgbe_init_rings - Allocate DMA resources for all rx/tx rings and
1059*6621Sbt150084  * initialize relevant hardware settings.
1060*6621Sbt150084  */
1061*6621Sbt150084 static int
1062*6621Sbt150084 ixgbe_init_rings(ixgbe_t *ixgbe)
1063*6621Sbt150084 {
1064*6621Sbt150084 	int i;
1065*6621Sbt150084 
1066*6621Sbt150084 	/*
1067*6621Sbt150084 	 * Allocate buffers for all the rx/tx rings
1068*6621Sbt150084 	 */
1069*6621Sbt150084 	if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS)
1070*6621Sbt150084 		return (IXGBE_FAILURE);
1071*6621Sbt150084 
1072*6621Sbt150084 	/*
1073*6621Sbt150084 	 * Setup the rx/tx rings
1074*6621Sbt150084 	 */
1075*6621Sbt150084 	mutex_enter(&ixgbe->gen_lock);
1076*6621Sbt150084 
1077*6621Sbt150084 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1078*6621Sbt150084 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1079*6621Sbt150084 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1080*6621Sbt150084 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1081*6621Sbt150084 
1082*6621Sbt150084 	ixgbe_setup_rings(ixgbe);
1083*6621Sbt150084 
1084*6621Sbt150084 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1085*6621Sbt150084 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1086*6621Sbt150084 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1087*6621Sbt150084 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1088*6621Sbt150084 
1089*6621Sbt150084 	mutex_exit(&ixgbe->gen_lock);
1090*6621Sbt150084 
1091*6621Sbt150084 	return (IXGBE_SUCCESS);
1092*6621Sbt150084 }
1093*6621Sbt150084 
1094*6621Sbt150084 /*
1095*6621Sbt150084  * ixgbe_fini_rings - Release DMA resources of all rx/tx rings.
1096*6621Sbt150084  */
1097*6621Sbt150084 static void
1098*6621Sbt150084 ixgbe_fini_rings(ixgbe_t *ixgbe)
1099*6621Sbt150084 {
1100*6621Sbt150084 	/*
1101*6621Sbt150084 	 * Release the DMA/memory resources of rx/tx rings
1102*6621Sbt150084 	 */
1103*6621Sbt150084 	ixgbe_free_dma(ixgbe);
1104*6621Sbt150084 }
1105*6621Sbt150084 
1106*6621Sbt150084 /*
1107*6621Sbt150084  * ixgbe_chip_start - Initialize and start the chipset hardware.
1108*6621Sbt150084  */
1109*6621Sbt150084 static int
1110*6621Sbt150084 ixgbe_chip_start(ixgbe_t *ixgbe)
1111*6621Sbt150084 {
1112*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
1113*6621Sbt150084 	int i;
1114*6621Sbt150084 
1115*6621Sbt150084 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1116*6621Sbt150084 
1117*6621Sbt150084 	/*
1118*6621Sbt150084 	 * Get the mac address
1119*6621Sbt150084 	 * This function should handle SPARC case correctly.
1120*6621Sbt150084 	 */
1121*6621Sbt150084 	if (!ixgbe_find_mac_address(ixgbe)) {
1122*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to get the mac address");
1123*6621Sbt150084 		return (IXGBE_FAILURE);
1124*6621Sbt150084 	}
1125*6621Sbt150084 
1126*6621Sbt150084 	/*
1127*6621Sbt150084 	 * Validate the mac address
1128*6621Sbt150084 	 */
1129*6621Sbt150084 	(void) ixgbe_init_rx_addrs(hw);
1130*6621Sbt150084 	if (!is_valid_mac_addr(hw->mac.addr)) {
1131*6621Sbt150084 		ixgbe_error(ixgbe, "Invalid mac address");
1132*6621Sbt150084 		return (IXGBE_FAILURE);
1133*6621Sbt150084 	}
1134*6621Sbt150084 
1135*6621Sbt150084 	/*
1136*6621Sbt150084 	 * Configure/Initialize hardware
1137*6621Sbt150084 	 */
1138*6621Sbt150084 	if (ixgbe_init_hw(hw) != IXGBE_SUCCESS) {
1139*6621Sbt150084 		ixgbe_error(ixgbe, "Failed to initialize hardware");
1140*6621Sbt150084 		return (IXGBE_FAILURE);
1141*6621Sbt150084 	}
1142*6621Sbt150084 
1143*6621Sbt150084 	/*
1144*6621Sbt150084 	 * Setup adapter interrupt vectors
1145*6621Sbt150084 	 */
1146*6621Sbt150084 	ixgbe_setup_adapter_vector(ixgbe);
1147*6621Sbt150084 
1148*6621Sbt150084 	/*
1149*6621Sbt150084 	 * Initialize unicast addresses.
1150*6621Sbt150084 	 */
1151*6621Sbt150084 	ixgbe_init_unicst(ixgbe);
1152*6621Sbt150084 
1153*6621Sbt150084 	/*
1154*6621Sbt150084 	 * Setup and initialize the mctable structures.
1155*6621Sbt150084 	 */
1156*6621Sbt150084 	ixgbe_setup_multicst(ixgbe);
1157*6621Sbt150084 
1158*6621Sbt150084 	/*
1159*6621Sbt150084 	 * Set interrupt throttling rate
1160*6621Sbt150084 	 */
1161*6621Sbt150084 	for (i = 0; i < ixgbe->intr_cnt; i++)
1162*6621Sbt150084 		IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1163*6621Sbt150084 
1164*6621Sbt150084 	/*
1165*6621Sbt150084 	 * Save the state of the phy
1166*6621Sbt150084 	 */
1167*6621Sbt150084 	ixgbe_get_hw_state(ixgbe);
1168*6621Sbt150084 
1169*6621Sbt150084 	/*
1170*6621Sbt150084 	 * Make sure driver has control
1171*6621Sbt150084 	 */
1172*6621Sbt150084 	ixgbe_get_driver_control(hw);
1173*6621Sbt150084 
1174*6621Sbt150084 	return (IXGBE_SUCCESS);
1175*6621Sbt150084 }
1176*6621Sbt150084 
1177*6621Sbt150084 /*
1178*6621Sbt150084  * ixgbe_chip_stop - Stop the chipset hardware
1179*6621Sbt150084  */
1180*6621Sbt150084 static void
1181*6621Sbt150084 ixgbe_chip_stop(ixgbe_t *ixgbe)
1182*6621Sbt150084 {
1183*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
1184*6621Sbt150084 
1185*6621Sbt150084 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1186*6621Sbt150084 
1187*6621Sbt150084 	/*
1188*6621Sbt150084 	 * Tell firmware driver is no longer in control
1189*6621Sbt150084 	 */
1190*6621Sbt150084 	ixgbe_release_driver_control(hw);
1191*6621Sbt150084 
1192*6621Sbt150084 	/*
1193*6621Sbt150084 	 * Reset the chipset
1194*6621Sbt150084 	 */
1195*6621Sbt150084 	(void) ixgbe_reset_hw(hw);
1196*6621Sbt150084 
1197*6621Sbt150084 	/*
1198*6621Sbt150084 	 * Reset PHY
1199*6621Sbt150084 	 */
1200*6621Sbt150084 	(void) ixgbe_reset_phy(hw);
1201*6621Sbt150084 }
1202*6621Sbt150084 
1203*6621Sbt150084 /*
1204*6621Sbt150084  * ixgbe_reset - Reset the chipset and re-start the driver.
1205*6621Sbt150084  *
1206*6621Sbt150084  * It involves stopping and re-starting the chipset,
1207*6621Sbt150084  * and re-configuring the rx/tx rings.
1208*6621Sbt150084  */
1209*6621Sbt150084 static int
1210*6621Sbt150084 ixgbe_reset(ixgbe_t *ixgbe)
1211*6621Sbt150084 {
1212*6621Sbt150084 	int i;
1213*6621Sbt150084 
1214*6621Sbt150084 	mutex_enter(&ixgbe->gen_lock);
1215*6621Sbt150084 
1216*6621Sbt150084 	ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1217*6621Sbt150084 	ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1218*6621Sbt150084 
1219*6621Sbt150084 	/*
1220*6621Sbt150084 	 * Disable the adapter interrupts to stop any rx/tx activities
1221*6621Sbt150084 	 * before draining pending data and resetting hardware.
1222*6621Sbt150084 	 */
1223*6621Sbt150084 	ixgbe_disable_adapter_interrupts(ixgbe);
1224*6621Sbt150084 
1225*6621Sbt150084 	/*
1226*6621Sbt150084 	 * Drain the pending transmit packets
1227*6621Sbt150084 	 */
1228*6621Sbt150084 	(void) ixgbe_tx_drain(ixgbe);
1229*6621Sbt150084 
1230*6621Sbt150084 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1231*6621Sbt150084 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1232*6621Sbt150084 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1233*6621Sbt150084 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1234*6621Sbt150084 
1235*6621Sbt150084 	/*
1236*6621Sbt150084 	 * Stop the chipset hardware
1237*6621Sbt150084 	 */
1238*6621Sbt150084 	ixgbe_chip_stop(ixgbe);
1239*6621Sbt150084 
1240*6621Sbt150084 	/*
1241*6621Sbt150084 	 * Clean the pending tx data/resources
1242*6621Sbt150084 	 */
1243*6621Sbt150084 	ixgbe_tx_clean(ixgbe);
1244*6621Sbt150084 
1245*6621Sbt150084 	/*
1246*6621Sbt150084 	 * Start the chipset hardware
1247*6621Sbt150084 	 */
1248*6621Sbt150084 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1249*6621Sbt150084 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1250*6621Sbt150084 		goto reset_failure;
1251*6621Sbt150084 	}
1252*6621Sbt150084 
1253*6621Sbt150084 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1254*6621Sbt150084 		goto reset_failure;
1255*6621Sbt150084 	}
1256*6621Sbt150084 
1257*6621Sbt150084 	/*
1258*6621Sbt150084 	 * Setup the rx/tx rings
1259*6621Sbt150084 	 */
1260*6621Sbt150084 	ixgbe_setup_rings(ixgbe);
1261*6621Sbt150084 
1262*6621Sbt150084 	/*
1263*6621Sbt150084 	 * Enable adapter interrupts
1264*6621Sbt150084 	 * The interrupts must be enabled after the driver state is START
1265*6621Sbt150084 	 */
1266*6621Sbt150084 	ixgbe_enable_adapter_interrupts(ixgbe);
1267*6621Sbt150084 
1268*6621Sbt150084 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1269*6621Sbt150084 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1270*6621Sbt150084 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1271*6621Sbt150084 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1272*6621Sbt150084 
1273*6621Sbt150084 	ixgbe->ixgbe_state |= IXGBE_STARTED;
1274*6621Sbt150084 	mutex_exit(&ixgbe->gen_lock);
1275*6621Sbt150084 
1276*6621Sbt150084 	return (IXGBE_SUCCESS);
1277*6621Sbt150084 
1278*6621Sbt150084 reset_failure:
1279*6621Sbt150084 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1280*6621Sbt150084 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1281*6621Sbt150084 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1282*6621Sbt150084 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1283*6621Sbt150084 
1284*6621Sbt150084 	mutex_exit(&ixgbe->gen_lock);
1285*6621Sbt150084 
1286*6621Sbt150084 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1287*6621Sbt150084 
1288*6621Sbt150084 	return (IXGBE_FAILURE);
1289*6621Sbt150084 }
1290*6621Sbt150084 
1291*6621Sbt150084 /*
1292*6621Sbt150084  * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1293*6621Sbt150084  */
1294*6621Sbt150084 static void
1295*6621Sbt150084 ixgbe_tx_clean(ixgbe_t *ixgbe)
1296*6621Sbt150084 {
1297*6621Sbt150084 	ixgbe_tx_ring_t *tx_ring;
1298*6621Sbt150084 	tx_control_block_t *tcb;
1299*6621Sbt150084 	link_list_t pending_list;
1300*6621Sbt150084 	uint32_t desc_num;
1301*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
1302*6621Sbt150084 	int i, j;
1303*6621Sbt150084 
1304*6621Sbt150084 	LINK_LIST_INIT(&pending_list);
1305*6621Sbt150084 
1306*6621Sbt150084 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1307*6621Sbt150084 		tx_ring = &ixgbe->tx_rings[i];
1308*6621Sbt150084 
1309*6621Sbt150084 		mutex_enter(&tx_ring->recycle_lock);
1310*6621Sbt150084 
1311*6621Sbt150084 		/*
1312*6621Sbt150084 		 * Clean the pending tx data - the pending packets in the
1313*6621Sbt150084 		 * work_list that have no chances to be transmitted again.
1314*6621Sbt150084 		 *
1315*6621Sbt150084 		 * We must ensure the chipset is stopped or the link is down
1316*6621Sbt150084 		 * before cleaning the transmit packets.
1317*6621Sbt150084 		 */
1318*6621Sbt150084 		desc_num = 0;
1319*6621Sbt150084 		for (j = 0; j < tx_ring->ring_size; j++) {
1320*6621Sbt150084 			tcb = tx_ring->work_list[j];
1321*6621Sbt150084 			if (tcb != NULL) {
1322*6621Sbt150084 				desc_num += tcb->desc_num;
1323*6621Sbt150084 
1324*6621Sbt150084 				tx_ring->work_list[j] = NULL;
1325*6621Sbt150084 
1326*6621Sbt150084 				ixgbe_free_tcb(tcb);
1327*6621Sbt150084 
1328*6621Sbt150084 				LIST_PUSH_TAIL(&pending_list, &tcb->link);
1329*6621Sbt150084 			}
1330*6621Sbt150084 		}
1331*6621Sbt150084 
1332*6621Sbt150084 		if (desc_num > 0) {
1333*6621Sbt150084 			atomic_add_32(&tx_ring->tbd_free, desc_num);
1334*6621Sbt150084 			ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1335*6621Sbt150084 
1336*6621Sbt150084 			/*
1337*6621Sbt150084 			 * Reset the head and tail pointers of the tbd ring;
1338*6621Sbt150084 			 * Reset the writeback head if it's enable.
1339*6621Sbt150084 			 */
1340*6621Sbt150084 			tx_ring->tbd_head = 0;
1341*6621Sbt150084 			tx_ring->tbd_tail = 0;
1342*6621Sbt150084 			if (ixgbe->tx_head_wb_enable)
1343*6621Sbt150084 				*tx_ring->tbd_head_wb = 0;
1344*6621Sbt150084 
1345*6621Sbt150084 			IXGBE_WRITE_REG(&ixgbe->hw,
1346*6621Sbt150084 			    IXGBE_TDH(tx_ring->index), 0);
1347*6621Sbt150084 			IXGBE_WRITE_REG(&ixgbe->hw,
1348*6621Sbt150084 			    IXGBE_TDT(tx_ring->index), 0);
1349*6621Sbt150084 		}
1350*6621Sbt150084 
1351*6621Sbt150084 		mutex_exit(&tx_ring->recycle_lock);
1352*6621Sbt150084 
1353*6621Sbt150084 		/*
1354*6621Sbt150084 		 * Add the tx control blocks in the pending list to
1355*6621Sbt150084 		 * the free list.
1356*6621Sbt150084 		 */
1357*6621Sbt150084 		ixgbe_put_free_list(tx_ring, &pending_list);
1358*6621Sbt150084 	}
1359*6621Sbt150084 }
1360*6621Sbt150084 
1361*6621Sbt150084 /*
1362*6621Sbt150084  * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1363*6621Sbt150084  * transmitted.
1364*6621Sbt150084  */
1365*6621Sbt150084 static boolean_t
1366*6621Sbt150084 ixgbe_tx_drain(ixgbe_t *ixgbe)
1367*6621Sbt150084 {
1368*6621Sbt150084 	ixgbe_tx_ring_t *tx_ring;
1369*6621Sbt150084 	boolean_t done;
1370*6621Sbt150084 	int i, j;
1371*6621Sbt150084 
1372*6621Sbt150084 	/*
1373*6621Sbt150084 	 * Wait for a specific time to allow pending tx packets
1374*6621Sbt150084 	 * to be transmitted.
1375*6621Sbt150084 	 *
1376*6621Sbt150084 	 * Check the counter tbd_free to see if transmission is done.
1377*6621Sbt150084 	 * No lock protection is needed here.
1378*6621Sbt150084 	 *
1379*6621Sbt150084 	 * Return B_TRUE if all pending packets have been transmitted;
1380*6621Sbt150084 	 * Otherwise return B_FALSE;
1381*6621Sbt150084 	 */
1382*6621Sbt150084 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1383*6621Sbt150084 
1384*6621Sbt150084 		done = B_TRUE;
1385*6621Sbt150084 		for (j = 0; j < ixgbe->num_tx_rings; j++) {
1386*6621Sbt150084 			tx_ring = &ixgbe->tx_rings[j];
1387*6621Sbt150084 			done = done &&
1388*6621Sbt150084 			    (tx_ring->tbd_free == tx_ring->ring_size);
1389*6621Sbt150084 		}
1390*6621Sbt150084 
1391*6621Sbt150084 		if (done)
1392*6621Sbt150084 			break;
1393*6621Sbt150084 
1394*6621Sbt150084 		msec_delay(1);
1395*6621Sbt150084 	}
1396*6621Sbt150084 
1397*6621Sbt150084 	return (done);
1398*6621Sbt150084 }
1399*6621Sbt150084 
1400*6621Sbt150084 /*
1401*6621Sbt150084  * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1402*6621Sbt150084  */
1403*6621Sbt150084 static boolean_t
1404*6621Sbt150084 ixgbe_rx_drain(ixgbe_t *ixgbe)
1405*6621Sbt150084 {
1406*6621Sbt150084 	ixgbe_rx_ring_t *rx_ring;
1407*6621Sbt150084 	boolean_t done;
1408*6621Sbt150084 	int i, j;
1409*6621Sbt150084 
1410*6621Sbt150084 	/*
1411*6621Sbt150084 	 * Polling the rx free list to check if those rx buffers held by
1412*6621Sbt150084 	 * the upper layer are released.
1413*6621Sbt150084 	 *
1414*6621Sbt150084 	 * Check the counter rcb_free to see if all pending buffers are
1415*6621Sbt150084 	 * released. No lock protection is needed here.
1416*6621Sbt150084 	 *
1417*6621Sbt150084 	 * Return B_TRUE if all pending buffers have been released;
1418*6621Sbt150084 	 * Otherwise return B_FALSE;
1419*6621Sbt150084 	 */
1420*6621Sbt150084 	for (i = 0; i < RX_DRAIN_TIME; i++) {
1421*6621Sbt150084 
1422*6621Sbt150084 		done = B_TRUE;
1423*6621Sbt150084 		for (j = 0; j < ixgbe->num_rx_rings; j++) {
1424*6621Sbt150084 			rx_ring = &ixgbe->rx_rings[j];
1425*6621Sbt150084 			done = done &&
1426*6621Sbt150084 			    (rx_ring->rcb_free == rx_ring->free_list_size);
1427*6621Sbt150084 		}
1428*6621Sbt150084 
1429*6621Sbt150084 		if (done)
1430*6621Sbt150084 			break;
1431*6621Sbt150084 
1432*6621Sbt150084 		msec_delay(1);
1433*6621Sbt150084 	}
1434*6621Sbt150084 
1435*6621Sbt150084 	return (done);
1436*6621Sbt150084 }
1437*6621Sbt150084 
1438*6621Sbt150084 /*
1439*6621Sbt150084  * ixgbe_start - Start the driver/chipset.
1440*6621Sbt150084  */
1441*6621Sbt150084 int
1442*6621Sbt150084 ixgbe_start(ixgbe_t *ixgbe)
1443*6621Sbt150084 {
1444*6621Sbt150084 	int i;
1445*6621Sbt150084 
1446*6621Sbt150084 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1447*6621Sbt150084 
1448*6621Sbt150084 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1449*6621Sbt150084 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1450*6621Sbt150084 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1451*6621Sbt150084 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1452*6621Sbt150084 
1453*6621Sbt150084 	/*
1454*6621Sbt150084 	 * Start the chipset hardware
1455*6621Sbt150084 	 */
1456*6621Sbt150084 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1457*6621Sbt150084 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1458*6621Sbt150084 		goto start_failure;
1459*6621Sbt150084 	}
1460*6621Sbt150084 
1461*6621Sbt150084 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1462*6621Sbt150084 		goto start_failure;
1463*6621Sbt150084 	}
1464*6621Sbt150084 
1465*6621Sbt150084 	/*
1466*6621Sbt150084 	 * Setup the rx/tx rings
1467*6621Sbt150084 	 */
1468*6621Sbt150084 	ixgbe_setup_rings(ixgbe);
1469*6621Sbt150084 
1470*6621Sbt150084 	/*
1471*6621Sbt150084 	 * Enable adapter interrupts
1472*6621Sbt150084 	 * The interrupts must be enabled after the driver state is START
1473*6621Sbt150084 	 */
1474*6621Sbt150084 	ixgbe_enable_adapter_interrupts(ixgbe);
1475*6621Sbt150084 
1476*6621Sbt150084 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1477*6621Sbt150084 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1478*6621Sbt150084 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1479*6621Sbt150084 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1480*6621Sbt150084 
1481*6621Sbt150084 	return (IXGBE_SUCCESS);
1482*6621Sbt150084 
1483*6621Sbt150084 start_failure:
1484*6621Sbt150084 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1485*6621Sbt150084 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1486*6621Sbt150084 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1487*6621Sbt150084 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1488*6621Sbt150084 
1489*6621Sbt150084 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1490*6621Sbt150084 
1491*6621Sbt150084 	return (IXGBE_FAILURE);
1492*6621Sbt150084 }
1493*6621Sbt150084 
1494*6621Sbt150084 /*
1495*6621Sbt150084  * ixgbe_stop - Stop the driver/chipset.
1496*6621Sbt150084  */
1497*6621Sbt150084 void
1498*6621Sbt150084 ixgbe_stop(ixgbe_t *ixgbe)
1499*6621Sbt150084 {
1500*6621Sbt150084 	int i;
1501*6621Sbt150084 
1502*6621Sbt150084 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1503*6621Sbt150084 
1504*6621Sbt150084 	/*
1505*6621Sbt150084 	 * Disable the adapter interrupts
1506*6621Sbt150084 	 */
1507*6621Sbt150084 	ixgbe_disable_adapter_interrupts(ixgbe);
1508*6621Sbt150084 
1509*6621Sbt150084 	/*
1510*6621Sbt150084 	 * Drain the pending tx packets
1511*6621Sbt150084 	 */
1512*6621Sbt150084 	(void) ixgbe_tx_drain(ixgbe);
1513*6621Sbt150084 
1514*6621Sbt150084 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1515*6621Sbt150084 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1516*6621Sbt150084 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1517*6621Sbt150084 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1518*6621Sbt150084 
1519*6621Sbt150084 	/*
1520*6621Sbt150084 	 * Stop the chipset hardware
1521*6621Sbt150084 	 */
1522*6621Sbt150084 	ixgbe_chip_stop(ixgbe);
1523*6621Sbt150084 
1524*6621Sbt150084 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1525*6621Sbt150084 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1526*6621Sbt150084 	}
1527*6621Sbt150084 
1528*6621Sbt150084 	/*
1529*6621Sbt150084 	 * Clean the pending tx data/resources
1530*6621Sbt150084 	 */
1531*6621Sbt150084 	ixgbe_tx_clean(ixgbe);
1532*6621Sbt150084 
1533*6621Sbt150084 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1534*6621Sbt150084 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1535*6621Sbt150084 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1536*6621Sbt150084 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1537*6621Sbt150084 }
1538*6621Sbt150084 
1539*6621Sbt150084 /*
1540*6621Sbt150084  * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1541*6621Sbt150084  */
1542*6621Sbt150084 static int
1543*6621Sbt150084 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1544*6621Sbt150084 {
1545*6621Sbt150084 	/*
1546*6621Sbt150084 	 * Allocate memory space for rx rings
1547*6621Sbt150084 	 */
1548*6621Sbt150084 	ixgbe->rx_rings = kmem_zalloc(
1549*6621Sbt150084 	    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1550*6621Sbt150084 	    KM_NOSLEEP);
1551*6621Sbt150084 
1552*6621Sbt150084 	if (ixgbe->rx_rings == NULL) {
1553*6621Sbt150084 		return (IXGBE_FAILURE);
1554*6621Sbt150084 	}
1555*6621Sbt150084 
1556*6621Sbt150084 	/*
1557*6621Sbt150084 	 * Allocate memory space for tx rings
1558*6621Sbt150084 	 */
1559*6621Sbt150084 	ixgbe->tx_rings = kmem_zalloc(
1560*6621Sbt150084 	    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1561*6621Sbt150084 	    KM_NOSLEEP);
1562*6621Sbt150084 
1563*6621Sbt150084 	if (ixgbe->tx_rings == NULL) {
1564*6621Sbt150084 		kmem_free(ixgbe->rx_rings,
1565*6621Sbt150084 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1566*6621Sbt150084 		ixgbe->rx_rings = NULL;
1567*6621Sbt150084 		return (IXGBE_FAILURE);
1568*6621Sbt150084 	}
1569*6621Sbt150084 
1570*6621Sbt150084 	return (IXGBE_SUCCESS);
1571*6621Sbt150084 }
1572*6621Sbt150084 
1573*6621Sbt150084 /*
1574*6621Sbt150084  * ixgbe_free_rings - Free the memory space of rx/tx rings.
1575*6621Sbt150084  */
1576*6621Sbt150084 static void
1577*6621Sbt150084 ixgbe_free_rings(ixgbe_t *ixgbe)
1578*6621Sbt150084 {
1579*6621Sbt150084 	if (ixgbe->rx_rings != NULL) {
1580*6621Sbt150084 		kmem_free(ixgbe->rx_rings,
1581*6621Sbt150084 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1582*6621Sbt150084 		ixgbe->rx_rings = NULL;
1583*6621Sbt150084 	}
1584*6621Sbt150084 
1585*6621Sbt150084 	if (ixgbe->tx_rings != NULL) {
1586*6621Sbt150084 		kmem_free(ixgbe->tx_rings,
1587*6621Sbt150084 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1588*6621Sbt150084 		ixgbe->tx_rings = NULL;
1589*6621Sbt150084 	}
1590*6621Sbt150084 }
1591*6621Sbt150084 
1592*6621Sbt150084 /*
1593*6621Sbt150084  * ixgbe_setup_rings - Setup rx/tx rings.
1594*6621Sbt150084  */
1595*6621Sbt150084 static void
1596*6621Sbt150084 ixgbe_setup_rings(ixgbe_t *ixgbe)
1597*6621Sbt150084 {
1598*6621Sbt150084 	/*
1599*6621Sbt150084 	 * Setup the rx/tx rings, including the following:
1600*6621Sbt150084 	 *
1601*6621Sbt150084 	 * 1. Setup the descriptor ring and the control block buffers;
1602*6621Sbt150084 	 * 2. Initialize necessary registers for receive/transmit;
1603*6621Sbt150084 	 * 3. Initialize software pointers/parameters for receive/transmit;
1604*6621Sbt150084 	 */
1605*6621Sbt150084 	ixgbe_setup_rx(ixgbe);
1606*6621Sbt150084 
1607*6621Sbt150084 	ixgbe_setup_tx(ixgbe);
1608*6621Sbt150084 }
1609*6621Sbt150084 
1610*6621Sbt150084 static void
1611*6621Sbt150084 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
1612*6621Sbt150084 {
1613*6621Sbt150084 	ixgbe_t *ixgbe = rx_ring->ixgbe;
1614*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
1615*6621Sbt150084 	rx_control_block_t *rcb;
1616*6621Sbt150084 	union ixgbe_adv_rx_desc	*rbd;
1617*6621Sbt150084 	uint32_t size;
1618*6621Sbt150084 	uint32_t buf_low;
1619*6621Sbt150084 	uint32_t buf_high;
1620*6621Sbt150084 	uint32_t reg_val;
1621*6621Sbt150084 	int i;
1622*6621Sbt150084 
1623*6621Sbt150084 	ASSERT(mutex_owned(&rx_ring->rx_lock));
1624*6621Sbt150084 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1625*6621Sbt150084 
1626*6621Sbt150084 	for (i = 0; i < ixgbe->rx_ring_size; i++) {
1627*6621Sbt150084 		rcb = rx_ring->work_list[i];
1628*6621Sbt150084 		rbd = &rx_ring->rbd_ring[i];
1629*6621Sbt150084 
1630*6621Sbt150084 		rbd->read.pkt_addr = rcb->rx_buf.dma_address;
1631*6621Sbt150084 		rbd->read.hdr_addr = NULL;
1632*6621Sbt150084 	}
1633*6621Sbt150084 
1634*6621Sbt150084 	/*
1635*6621Sbt150084 	 * Initialize the length register
1636*6621Sbt150084 	 */
1637*6621Sbt150084 	size = rx_ring->ring_size * sizeof (union ixgbe_adv_rx_desc);
1638*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->index), size);
1639*6621Sbt150084 
1640*6621Sbt150084 	/*
1641*6621Sbt150084 	 * Initialize the base address registers
1642*6621Sbt150084 	 */
1643*6621Sbt150084 	buf_low = (uint32_t)rx_ring->rbd_area.dma_address;
1644*6621Sbt150084 	buf_high = (uint32_t)(rx_ring->rbd_area.dma_address >> 32);
1645*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->index), buf_high);
1646*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->index), buf_low);
1647*6621Sbt150084 
1648*6621Sbt150084 	/*
1649*6621Sbt150084 	 * Setup head & tail pointers
1650*6621Sbt150084 	 */
1651*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->index), rx_ring->ring_size - 1);
1652*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->index), 0);
1653*6621Sbt150084 
1654*6621Sbt150084 	rx_ring->rbd_next = 0;
1655*6621Sbt150084 
1656*6621Sbt150084 	/*
1657*6621Sbt150084 	 * Note: Considering the case that the chipset is being reset
1658*6621Sbt150084 	 * and there are still some buffers held by the upper layer,
1659*6621Sbt150084 	 * we should not reset the values of rcb_head, rcb_tail and
1660*6621Sbt150084 	 * rcb_free if the state is not IXGBE_UNKNOWN.
1661*6621Sbt150084 	 */
1662*6621Sbt150084 	if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) {
1663*6621Sbt150084 		rx_ring->rcb_head = 0;
1664*6621Sbt150084 		rx_ring->rcb_tail = 0;
1665*6621Sbt150084 		rx_ring->rcb_free = rx_ring->free_list_size;
1666*6621Sbt150084 	}
1667*6621Sbt150084 
1668*6621Sbt150084 	/*
1669*6621Sbt150084 	 * Setup the Receive Descriptor Control Register (RXDCTL)
1670*6621Sbt150084 	 * PTHRESH=32 descriptors (half the internal cache)
1671*6621Sbt150084 	 * HTHRESH=0 descriptors (to minimize latency on fetch)
1672*6621Sbt150084 	 * WTHRESH defaults to 1 (writeback each descriptor)
1673*6621Sbt150084 	 */
1674*6621Sbt150084 	reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->index));
1675*6621Sbt150084 	reg_val |= IXGBE_RXDCTL_ENABLE;	/* enable queue */
1676*6621Sbt150084 	reg_val |= 0x0020;		/* pthresh */
1677*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->index), reg_val);
1678*6621Sbt150084 
1679*6621Sbt150084 	/*
1680*6621Sbt150084 	 * Setup the Split and Replication Receive Control Register.
1681*6621Sbt150084 	 * Set the rx buffer size and the advanced descriptor type.
1682*6621Sbt150084 	 */
1683*6621Sbt150084 	reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
1684*6621Sbt150084 	    IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1685*6621Sbt150084 
1686*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->index), reg_val);
1687*6621Sbt150084 }
1688*6621Sbt150084 
1689*6621Sbt150084 static void
1690*6621Sbt150084 ixgbe_setup_rx(ixgbe_t *ixgbe)
1691*6621Sbt150084 {
1692*6621Sbt150084 	ixgbe_rx_ring_t *rx_ring;
1693*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
1694*6621Sbt150084 	uint32_t reg_val;
1695*6621Sbt150084 	int i;
1696*6621Sbt150084 
1697*6621Sbt150084 	/*
1698*6621Sbt150084 	 * Set filter control in FCTRL to accept broadcast packets and do
1699*6621Sbt150084 	 * not pass pause frames to host.  Flow control settings are already
1700*6621Sbt150084 	 * in this register, so preserve them.
1701*6621Sbt150084 	 */
1702*6621Sbt150084 	reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1703*6621Sbt150084 	reg_val |= IXGBE_FCTRL_BAM;	/* broadcast accept mode */
1704*6621Sbt150084 	reg_val |= IXGBE_FCTRL_DPF;	/* discard pause frames */
1705*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
1706*6621Sbt150084 
1707*6621Sbt150084 	/*
1708*6621Sbt150084 	 * Enable the receive unit.  This must be done after filter
1709*6621Sbt150084 	 * control is set in FCTRL.
1710*6621Sbt150084 	 */
1711*6621Sbt150084 	reg_val = (IXGBE_RXCTRL_RXEN	/* Enable Receive Unit */
1712*6621Sbt150084 	    | IXGBE_RXCTRL_DMBYPS);	/* descriptor monitor bypass */
1713*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
1714*6621Sbt150084 
1715*6621Sbt150084 	/*
1716*6621Sbt150084 	 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
1717*6621Sbt150084 	 */
1718*6621Sbt150084 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1719*6621Sbt150084 		rx_ring = &ixgbe->rx_rings[i];
1720*6621Sbt150084 		ixgbe_setup_rx_ring(rx_ring);
1721*6621Sbt150084 	}
1722*6621Sbt150084 
1723*6621Sbt150084 	/*
1724*6621Sbt150084 	 * The Max Frame Size in MHADD will be internally increased by four
1725*6621Sbt150084 	 * bytes if the packet has a VLAN field, so includes MTU, ethernet
1726*6621Sbt150084 	 * header and frame check sequence.
1727*6621Sbt150084 	 */
1728*6621Sbt150084 	reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
1729*6621Sbt150084 	    + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
1730*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
1731*6621Sbt150084 
1732*6621Sbt150084 	/*
1733*6621Sbt150084 	 * Setup Jumbo Frame enable bit
1734*6621Sbt150084 	 */
1735*6621Sbt150084 	if (ixgbe->default_mtu > ETHERMTU) {
1736*6621Sbt150084 		reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1737*6621Sbt150084 		reg_val |= IXGBE_HLREG0_JUMBOEN;
1738*6621Sbt150084 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
1739*6621Sbt150084 	}
1740*6621Sbt150084 
1741*6621Sbt150084 	/*
1742*6621Sbt150084 	 * Hardware checksum settings
1743*6621Sbt150084 	 */
1744*6621Sbt150084 	if (ixgbe->rx_hcksum_enable) {
1745*6621Sbt150084 		reg_val = IXGBE_RXCSUM_IPPCSE;	/* IP checksum */
1746*6621Sbt150084 		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
1747*6621Sbt150084 	}
1748*6621Sbt150084 
1749*6621Sbt150084 	/*
1750*6621Sbt150084 	 * Setup RSS for multiple receive queues
1751*6621Sbt150084 	 */
1752*6621Sbt150084 	if (ixgbe->num_rx_rings > 1)
1753*6621Sbt150084 		ixgbe_setup_rss(ixgbe);
1754*6621Sbt150084 }
1755*6621Sbt150084 
1756*6621Sbt150084 static void
1757*6621Sbt150084 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
1758*6621Sbt150084 {
1759*6621Sbt150084 	ixgbe_t *ixgbe = tx_ring->ixgbe;
1760*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
1761*6621Sbt150084 	uint32_t size;
1762*6621Sbt150084 	uint32_t buf_low;
1763*6621Sbt150084 	uint32_t buf_high;
1764*6621Sbt150084 	uint32_t reg_val;
1765*6621Sbt150084 
1766*6621Sbt150084 	ASSERT(mutex_owned(&tx_ring->tx_lock));
1767*6621Sbt150084 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1768*6621Sbt150084 
1769*6621Sbt150084 	/*
1770*6621Sbt150084 	 * Initialize the length register
1771*6621Sbt150084 	 */
1772*6621Sbt150084 	size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
1773*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
1774*6621Sbt150084 
1775*6621Sbt150084 	/*
1776*6621Sbt150084 	 * Initialize the base address registers
1777*6621Sbt150084 	 */
1778*6621Sbt150084 	buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
1779*6621Sbt150084 	buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
1780*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
1781*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
1782*6621Sbt150084 
1783*6621Sbt150084 	/*
1784*6621Sbt150084 	 * setup TXDCTL(tx_ring->index)
1785*6621Sbt150084 	 */
1786*6621Sbt150084 	reg_val = IXGBE_TXDCTL_ENABLE;
1787*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
1788*6621Sbt150084 
1789*6621Sbt150084 	/*
1790*6621Sbt150084 	 * Setup head & tail pointers
1791*6621Sbt150084 	 */
1792*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
1793*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
1794*6621Sbt150084 
1795*6621Sbt150084 	/*
1796*6621Sbt150084 	 * Setup head write-back
1797*6621Sbt150084 	 */
1798*6621Sbt150084 	if (ixgbe->tx_head_wb_enable) {
1799*6621Sbt150084 		/*
1800*6621Sbt150084 		 * The memory of the head write-back is allocated using
1801*6621Sbt150084 		 * the extra tbd beyond the tail of the tbd ring.
1802*6621Sbt150084 		 */
1803*6621Sbt150084 		tx_ring->tbd_head_wb = (uint32_t *)
1804*6621Sbt150084 		    ((uintptr_t)tx_ring->tbd_area.address + size);
1805*6621Sbt150084 		*tx_ring->tbd_head_wb = 0;
1806*6621Sbt150084 
1807*6621Sbt150084 		buf_low = (uint32_t)
1808*6621Sbt150084 		    (tx_ring->tbd_area.dma_address + size);
1809*6621Sbt150084 		buf_high = (uint32_t)
1810*6621Sbt150084 		    ((tx_ring->tbd_area.dma_address + size) >> 32);
1811*6621Sbt150084 
1812*6621Sbt150084 		/* Set the head write-back enable bit */
1813*6621Sbt150084 		buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1814*6621Sbt150084 
1815*6621Sbt150084 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
1816*6621Sbt150084 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
1817*6621Sbt150084 
1818*6621Sbt150084 		/*
1819*6621Sbt150084 		 * Turn off relaxed ordering for head write back or it will
1820*6621Sbt150084 		 * cause problems with the tx recycling
1821*6621Sbt150084 		 */
1822*6621Sbt150084 		reg_val = IXGBE_READ_REG(hw,
1823*6621Sbt150084 		    IXGBE_DCA_TXCTRL(tx_ring->index));
1824*6621Sbt150084 		reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1825*6621Sbt150084 		IXGBE_WRITE_REG(hw,
1826*6621Sbt150084 		    IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
1827*6621Sbt150084 	} else {
1828*6621Sbt150084 		tx_ring->tbd_head_wb = NULL;
1829*6621Sbt150084 	}
1830*6621Sbt150084 
1831*6621Sbt150084 	tx_ring->tbd_head = 0;
1832*6621Sbt150084 	tx_ring->tbd_tail = 0;
1833*6621Sbt150084 	tx_ring->tbd_free = tx_ring->ring_size;
1834*6621Sbt150084 
1835*6621Sbt150084 	/*
1836*6621Sbt150084 	 * Note: Considering the case that the chipset is being reset,
1837*6621Sbt150084 	 * and there are still some tcb in the pending list,
1838*6621Sbt150084 	 * we should not reset the values of tcb_head, tcb_tail and
1839*6621Sbt150084 	 * tcb_free if the state is not IXGBE_UNKNOWN.
1840*6621Sbt150084 	 */
1841*6621Sbt150084 	if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) {
1842*6621Sbt150084 		tx_ring->tcb_head = 0;
1843*6621Sbt150084 		tx_ring->tcb_tail = 0;
1844*6621Sbt150084 		tx_ring->tcb_free = tx_ring->free_list_size;
1845*6621Sbt150084 	}
1846*6621Sbt150084 
1847*6621Sbt150084 	/*
1848*6621Sbt150084 	 * Initialize hardware checksum offload settings
1849*6621Sbt150084 	 */
1850*6621Sbt150084 	tx_ring->hcksum_context.hcksum_flags = 0;
1851*6621Sbt150084 	tx_ring->hcksum_context.ip_hdr_len = 0;
1852*6621Sbt150084 	tx_ring->hcksum_context.mac_hdr_len = 0;
1853*6621Sbt150084 	tx_ring->hcksum_context.l4_proto = 0;
1854*6621Sbt150084 }
1855*6621Sbt150084 
1856*6621Sbt150084 static void
1857*6621Sbt150084 ixgbe_setup_tx(ixgbe_t *ixgbe)
1858*6621Sbt150084 {
1859*6621Sbt150084 	ixgbe_tx_ring_t *tx_ring;
1860*6621Sbt150084 	int i;
1861*6621Sbt150084 
1862*6621Sbt150084 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1863*6621Sbt150084 		tx_ring = &ixgbe->tx_rings[i];
1864*6621Sbt150084 		ixgbe_setup_tx_ring(tx_ring);
1865*6621Sbt150084 	}
1866*6621Sbt150084 }
1867*6621Sbt150084 
1868*6621Sbt150084 /*
1869*6621Sbt150084  * ixgbe_setup_rss - Setup receive-side scaling feature.
1870*6621Sbt150084  */
1871*6621Sbt150084 static void
1872*6621Sbt150084 ixgbe_setup_rss(ixgbe_t *ixgbe)
1873*6621Sbt150084 {
1874*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
1875*6621Sbt150084 	uint32_t i, j, mrqc, rxcsum;
1876*6621Sbt150084 	uint32_t random;
1877*6621Sbt150084 	uint32_t reta;
1878*6621Sbt150084 
1879*6621Sbt150084 	/*
1880*6621Sbt150084 	 * Fill out redirection table
1881*6621Sbt150084 	 */
1882*6621Sbt150084 	j = 0;
1883*6621Sbt150084 	reta = 0;
1884*6621Sbt150084 	for (i = 0; i < 128; i++) {
1885*6621Sbt150084 		reta = (reta << 8) | (j * 0x11);
1886*6621Sbt150084 		if (j == 3)
1887*6621Sbt150084 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
1888*6621Sbt150084 		j = ((j + 1) % 4);
1889*6621Sbt150084 	}
1890*6621Sbt150084 
1891*6621Sbt150084 	/*
1892*6621Sbt150084 	 * Fill out hash function seeds with a random constant
1893*6621Sbt150084 	 */
1894*6621Sbt150084 	for (i = 0; i < 10; i++) {
1895*6621Sbt150084 		(void) random_get_pseudo_bytes((uint8_t *)&random,
1896*6621Sbt150084 		    sizeof (uint32_t));
1897*6621Sbt150084 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
1898*6621Sbt150084 	}
1899*6621Sbt150084 
1900*6621Sbt150084 	/*
1901*6621Sbt150084 	 * enable RSS & perform hash on these packet types
1902*6621Sbt150084 	 */
1903*6621Sbt150084 	mrqc = IXGBE_MRQC_RSSEN |
1904*6621Sbt150084 	    IXGBE_MRQC_RSS_FIELD_IPV4 |
1905*6621Sbt150084 	    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
1906*6621Sbt150084 	    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
1907*6621Sbt150084 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
1908*6621Sbt150084 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
1909*6621Sbt150084 	    IXGBE_MRQC_RSS_FIELD_IPV6 |
1910*6621Sbt150084 	    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
1911*6621Sbt150084 	    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
1912*6621Sbt150084 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
1913*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
1914*6621Sbt150084 
1915*6621Sbt150084 	/*
1916*6621Sbt150084 	 * Disable Packet Checksum to enable RSS for multiple receive queues.
1917*6621Sbt150084 	 *
1918*6621Sbt150084 	 * It is an adapter hardware limitation that Packet Checksum is
1919*6621Sbt150084 	 * mutually exclusive with RSS.
1920*6621Sbt150084 	 */
1921*6621Sbt150084 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1922*6621Sbt150084 	rxcsum |= IXGBE_RXCSUM_PCSD;
1923*6621Sbt150084 	rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
1924*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1925*6621Sbt150084 }
1926*6621Sbt150084 
1927*6621Sbt150084 /*
1928*6621Sbt150084  * ixgbe_init_unicst - Initialize the unicast addresses.
1929*6621Sbt150084  */
1930*6621Sbt150084 static void
1931*6621Sbt150084 ixgbe_init_unicst(ixgbe_t *ixgbe)
1932*6621Sbt150084 {
1933*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
1934*6621Sbt150084 	int slot;
1935*6621Sbt150084 	/*
1936*6621Sbt150084 	 * Here we should consider two situations:
1937*6621Sbt150084 	 *
1938*6621Sbt150084 	 * 1. Chipset is initialized the first time
1939*6621Sbt150084 	 *    Initialize the multiple unicast addresses, and
1940*6621Sbt150084 	 *    save the default mac address.
1941*6621Sbt150084 	 *
1942*6621Sbt150084 	 * 2. Chipset is reset
1943*6621Sbt150084 	 *    Recover the multiple unicast addresses from the
1944*6621Sbt150084 	 *    software data structure to the RAR registers.
1945*6621Sbt150084 	 */
1946*6621Sbt150084 	if (!ixgbe->unicst_init) {
1947*6621Sbt150084 		/*
1948*6621Sbt150084 		 * Initialize the multiple unicast addresses
1949*6621Sbt150084 		 */
1950*6621Sbt150084 		ixgbe->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
1951*6621Sbt150084 
1952*6621Sbt150084 		ixgbe->unicst_avail = ixgbe->unicst_total - 1;
1953*6621Sbt150084 
1954*6621Sbt150084 		bcopy(hw->mac.addr, ixgbe->unicst_addr[0].mac.addr,
1955*6621Sbt150084 		    ETHERADDRL);
1956*6621Sbt150084 		ixgbe->unicst_addr[0].mac.set = 1;
1957*6621Sbt150084 
1958*6621Sbt150084 		for (slot = 1; slot < ixgbe->unicst_total; slot++)
1959*6621Sbt150084 			ixgbe->unicst_addr[slot].mac.set = 0;
1960*6621Sbt150084 
1961*6621Sbt150084 		ixgbe->unicst_init = B_TRUE;
1962*6621Sbt150084 	} else {
1963*6621Sbt150084 		/*
1964*6621Sbt150084 		 * Recover the default mac address
1965*6621Sbt150084 		 */
1966*6621Sbt150084 		bcopy(ixgbe->unicst_addr[0].mac.addr, hw->mac.addr,
1967*6621Sbt150084 		    ETHERADDRL);
1968*6621Sbt150084 
1969*6621Sbt150084 		/* Re-configure the RAR registers */
1970*6621Sbt150084 		for (slot = 1; slot < ixgbe->unicst_total; slot++)
1971*6621Sbt150084 			(void) ixgbe_set_rar(hw, slot,
1972*6621Sbt150084 			    ixgbe->unicst_addr[slot].mac.addr, NULL, NULL);
1973*6621Sbt150084 	}
1974*6621Sbt150084 }
1975*6621Sbt150084 /*
1976*6621Sbt150084  * ixgbe_unicst_set - Set the unicast address to the specified slot.
1977*6621Sbt150084  */
1978*6621Sbt150084 int
1979*6621Sbt150084 ixgbe_unicst_set(ixgbe_t *ixgbe, const uint8_t *mac_addr,
1980*6621Sbt150084     mac_addr_slot_t slot)
1981*6621Sbt150084 {
1982*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
1983*6621Sbt150084 
1984*6621Sbt150084 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1985*6621Sbt150084 
1986*6621Sbt150084 	/*
1987*6621Sbt150084 	 * Save the unicast address in the software data structure
1988*6621Sbt150084 	 */
1989*6621Sbt150084 	bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
1990*6621Sbt150084 
1991*6621Sbt150084 	/*
1992*6621Sbt150084 	 * Set the unicast address to the RAR register
1993*6621Sbt150084 	 */
1994*6621Sbt150084 	(void) ixgbe_set_rar(hw, slot, (uint8_t *)mac_addr, NULL, NULL);
1995*6621Sbt150084 
1996*6621Sbt150084 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1997*6621Sbt150084 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
1998*6621Sbt150084 		return (EIO);
1999*6621Sbt150084 	}
2000*6621Sbt150084 
2001*6621Sbt150084 	return (0);
2002*6621Sbt150084 }
2003*6621Sbt150084 
2004*6621Sbt150084 /*
2005*6621Sbt150084  * ixgbe_multicst_add - Add a multicst address.
2006*6621Sbt150084  */
2007*6621Sbt150084 int
2008*6621Sbt150084 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2009*6621Sbt150084 {
2010*6621Sbt150084 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2011*6621Sbt150084 
2012*6621Sbt150084 	if ((multiaddr[0] & 01) == 0) {
2013*6621Sbt150084 		return (EINVAL);
2014*6621Sbt150084 	}
2015*6621Sbt150084 
2016*6621Sbt150084 	if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2017*6621Sbt150084 		return (ENOENT);
2018*6621Sbt150084 	}
2019*6621Sbt150084 
2020*6621Sbt150084 	bcopy(multiaddr,
2021*6621Sbt150084 	    &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2022*6621Sbt150084 	ixgbe->mcast_count++;
2023*6621Sbt150084 
2024*6621Sbt150084 	/*
2025*6621Sbt150084 	 * Update the multicast table in the hardware
2026*6621Sbt150084 	 */
2027*6621Sbt150084 	ixgbe_setup_multicst(ixgbe);
2028*6621Sbt150084 
2029*6621Sbt150084 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2030*6621Sbt150084 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2031*6621Sbt150084 		return (EIO);
2032*6621Sbt150084 	}
2033*6621Sbt150084 
2034*6621Sbt150084 	return (0);
2035*6621Sbt150084 }
2036*6621Sbt150084 
2037*6621Sbt150084 /*
2038*6621Sbt150084  * ixgbe_multicst_remove - Remove a multicst address.
2039*6621Sbt150084  */
2040*6621Sbt150084 int
2041*6621Sbt150084 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2042*6621Sbt150084 {
2043*6621Sbt150084 	int i;
2044*6621Sbt150084 
2045*6621Sbt150084 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2046*6621Sbt150084 
2047*6621Sbt150084 	for (i = 0; i < ixgbe->mcast_count; i++) {
2048*6621Sbt150084 		if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2049*6621Sbt150084 		    ETHERADDRL) == 0) {
2050*6621Sbt150084 			for (i++; i < ixgbe->mcast_count; i++) {
2051*6621Sbt150084 				ixgbe->mcast_table[i - 1] =
2052*6621Sbt150084 				    ixgbe->mcast_table[i];
2053*6621Sbt150084 			}
2054*6621Sbt150084 			ixgbe->mcast_count--;
2055*6621Sbt150084 			break;
2056*6621Sbt150084 		}
2057*6621Sbt150084 	}
2058*6621Sbt150084 
2059*6621Sbt150084 	/*
2060*6621Sbt150084 	 * Update the multicast table in the hardware
2061*6621Sbt150084 	 */
2062*6621Sbt150084 	ixgbe_setup_multicst(ixgbe);
2063*6621Sbt150084 
2064*6621Sbt150084 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2065*6621Sbt150084 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2066*6621Sbt150084 		return (EIO);
2067*6621Sbt150084 	}
2068*6621Sbt150084 
2069*6621Sbt150084 	return (0);
2070*6621Sbt150084 }
2071*6621Sbt150084 
2072*6621Sbt150084 /*
2073*6621Sbt150084  * ixgbe_setup_multicast - Setup multicast data structures.
2074*6621Sbt150084  *
2075*6621Sbt150084  * This routine initializes all of the multicast related structures
2076*6621Sbt150084  * and save them in the hardware registers.
2077*6621Sbt150084  */
2078*6621Sbt150084 static void
2079*6621Sbt150084 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2080*6621Sbt150084 {
2081*6621Sbt150084 	uint8_t *mc_addr_list;
2082*6621Sbt150084 	uint32_t mc_addr_count;
2083*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
2084*6621Sbt150084 
2085*6621Sbt150084 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2086*6621Sbt150084 
2087*6621Sbt150084 	ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2088*6621Sbt150084 
2089*6621Sbt150084 	mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2090*6621Sbt150084 	mc_addr_count = ixgbe->mcast_count;
2091*6621Sbt150084 
2092*6621Sbt150084 	/*
2093*6621Sbt150084 	 * Update the multicast addresses to the MTA registers
2094*6621Sbt150084 	 */
2095*6621Sbt150084 	(void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2096*6621Sbt150084 	    ixgbe_mc_table_itr);
2097*6621Sbt150084 }
2098*6621Sbt150084 
2099*6621Sbt150084 /*
2100*6621Sbt150084  * ixgbe_get_conf - Get driver configurations set in driver.conf.
2101*6621Sbt150084  *
2102*6621Sbt150084  * This routine gets user-configured values out of the configuration
2103*6621Sbt150084  * file ixgbe.conf.
2104*6621Sbt150084  *
2105*6621Sbt150084  * For each configurable value, there is a minimum, a maximum, and a
2106*6621Sbt150084  * default.
2107*6621Sbt150084  * If user does not configure a value, use the default.
2108*6621Sbt150084  * If user configures below the minimum, use the minumum.
2109*6621Sbt150084  * If user configures above the maximum, use the maxumum.
2110*6621Sbt150084  */
2111*6621Sbt150084 static void
2112*6621Sbt150084 ixgbe_get_conf(ixgbe_t *ixgbe)
2113*6621Sbt150084 {
2114*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
2115*6621Sbt150084 	uint32_t flow_control;
2116*6621Sbt150084 
2117*6621Sbt150084 	/*
2118*6621Sbt150084 	 * ixgbe driver supports the following user configurations:
2119*6621Sbt150084 	 *
2120*6621Sbt150084 	 * Jumbo frame configuration:
2121*6621Sbt150084 	 *    default_mtu
2122*6621Sbt150084 	 *
2123*6621Sbt150084 	 * Ethernet flow control configuration:
2124*6621Sbt150084 	 *    flow_control
2125*6621Sbt150084 	 *
2126*6621Sbt150084 	 * Multiple rings configurations:
2127*6621Sbt150084 	 *    tx_queue_number
2128*6621Sbt150084 	 *    tx_ring_size
2129*6621Sbt150084 	 *    rx_queue_number
2130*6621Sbt150084 	 *    rx_ring_size
2131*6621Sbt150084 	 *
2132*6621Sbt150084 	 * Call ixgbe_get_prop() to get the value for a specific
2133*6621Sbt150084 	 * configuration parameter.
2134*6621Sbt150084 	 */
2135*6621Sbt150084 
2136*6621Sbt150084 	/*
2137*6621Sbt150084 	 * Jumbo frame configuration - max_frame_size controls host buffer
2138*6621Sbt150084 	 * allocation, so includes MTU, ethernet header, vlan tag and
2139*6621Sbt150084 	 * frame check sequence.
2140*6621Sbt150084 	 */
2141*6621Sbt150084 	ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
2142*6621Sbt150084 	    MIN_MTU, MAX_MTU, DEFAULT_MTU);
2143*6621Sbt150084 
2144*6621Sbt150084 	ixgbe->max_frame_size = ixgbe->default_mtu +
2145*6621Sbt150084 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
2146*6621Sbt150084 
2147*6621Sbt150084 	/*
2148*6621Sbt150084 	 * Ethernet flow control configuration
2149*6621Sbt150084 	 */
2150*6621Sbt150084 	flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
2151*6621Sbt150084 	    ixgbe_fc_none, 3, ixgbe_fc_full);
2152*6621Sbt150084 	if (flow_control == 3)
2153*6621Sbt150084 		flow_control = ixgbe_fc_default;
2154*6621Sbt150084 
2155*6621Sbt150084 	hw->fc.type = flow_control;
2156*6621Sbt150084 
2157*6621Sbt150084 	/*
2158*6621Sbt150084 	 * Multiple rings configurations
2159*6621Sbt150084 	 */
2160*6621Sbt150084 	ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
2161*6621Sbt150084 	    MIN_TX_QUEUE_NUM, MAX_TX_QUEUE_NUM, DEFAULT_TX_QUEUE_NUM);
2162*6621Sbt150084 	ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
2163*6621Sbt150084 	    MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2164*6621Sbt150084 
2165*6621Sbt150084 	ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
2166*6621Sbt150084 	    MIN_RX_QUEUE_NUM, MAX_RX_QUEUE_NUM, DEFAULT_RX_QUEUE_NUM);
2167*6621Sbt150084 	ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
2168*6621Sbt150084 	    MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2169*6621Sbt150084 
2170*6621Sbt150084 	/*
2171*6621Sbt150084 	 * Tunable used to force an interrupt type. The only use is
2172*6621Sbt150084 	 * for testing of the lesser interrupt types.
2173*6621Sbt150084 	 * 0 = don't force interrupt type
2174*6621Sbt150084 	 * 1 = force interrupt type MSIX
2175*6621Sbt150084 	 * 2 = force interrupt type MSI
2176*6621Sbt150084 	 * 3 = force interrupt type Legacy
2177*6621Sbt150084 	 */
2178*6621Sbt150084 	ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
2179*6621Sbt150084 	    IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
2180*6621Sbt150084 	ixgbe_log(ixgbe, "interrupt force: %d\n", ixgbe->intr_force);
2181*6621Sbt150084 
2182*6621Sbt150084 	ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
2183*6621Sbt150084 	    0, 1, 1);
2184*6621Sbt150084 	ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
2185*6621Sbt150084 	    0, 1, 1);
2186*6621Sbt150084 	ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
2187*6621Sbt150084 	    0, 1, 0);
2188*6621Sbt150084 	ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
2189*6621Sbt150084 	    0, 1, 1);
2190*6621Sbt150084 
2191*6621Sbt150084 	ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
2192*6621Sbt150084 	    MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
2193*6621Sbt150084 	    DEFAULT_TX_COPY_THRESHOLD);
2194*6621Sbt150084 	ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
2195*6621Sbt150084 	    PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
2196*6621Sbt150084 	    MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
2197*6621Sbt150084 	ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
2198*6621Sbt150084 	    PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
2199*6621Sbt150084 	    MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
2200*6621Sbt150084 	ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
2201*6621Sbt150084 	    PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
2202*6621Sbt150084 	    MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
2203*6621Sbt150084 
2204*6621Sbt150084 	ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
2205*6621Sbt150084 	    MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
2206*6621Sbt150084 	    DEFAULT_RX_COPY_THRESHOLD);
2207*6621Sbt150084 	ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
2208*6621Sbt150084 	    MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
2209*6621Sbt150084 	    DEFAULT_RX_LIMIT_PER_INTR);
2210*6621Sbt150084 
2211*6621Sbt150084 	ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
2212*6621Sbt150084 	    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
2213*6621Sbt150084 	    DEFAULT_INTR_THROTTLING);
2214*6621Sbt150084 }
2215*6621Sbt150084 
2216*6621Sbt150084 /*
2217*6621Sbt150084  * ixgbe_get_prop - Get a property value out of the configuration file
2218*6621Sbt150084  * ixgbe.conf.
2219*6621Sbt150084  *
2220*6621Sbt150084  * Caller provides the name of the property, a default value, a minimum
2221*6621Sbt150084  * value, and a maximum value.
2222*6621Sbt150084  *
2223*6621Sbt150084  * Return configured value of the property, with default, minimum and
2224*6621Sbt150084  * maximum properly applied.
2225*6621Sbt150084  */
2226*6621Sbt150084 static int
2227*6621Sbt150084 ixgbe_get_prop(ixgbe_t *ixgbe,
2228*6621Sbt150084     char *propname,	/* name of the property */
2229*6621Sbt150084     int minval,		/* minimum acceptable value */
2230*6621Sbt150084     int maxval,		/* maximim acceptable value */
2231*6621Sbt150084     int defval)		/* default value */
2232*6621Sbt150084 {
2233*6621Sbt150084 	int value;
2234*6621Sbt150084 
2235*6621Sbt150084 	/*
2236*6621Sbt150084 	 * Call ddi_prop_get_int() to read the conf settings
2237*6621Sbt150084 	 */
2238*6621Sbt150084 	value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
2239*6621Sbt150084 	    DDI_PROP_DONTPASS, propname, defval);
2240*6621Sbt150084 	if (value > maxval)
2241*6621Sbt150084 		value = maxval;
2242*6621Sbt150084 
2243*6621Sbt150084 	if (value < minval)
2244*6621Sbt150084 		value = minval;
2245*6621Sbt150084 
2246*6621Sbt150084 	return (value);
2247*6621Sbt150084 }
2248*6621Sbt150084 
2249*6621Sbt150084 /*
2250*6621Sbt150084  * ixgbe_driver_setup_link - Using the link properties to setup the link.
2251*6621Sbt150084  */
2252*6621Sbt150084 int
2253*6621Sbt150084 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
2254*6621Sbt150084 {
2255*6621Sbt150084 	struct ixgbe_mac_info *mac;
2256*6621Sbt150084 	struct ixgbe_phy_info *phy;
2257*6621Sbt150084 	boolean_t invalid;
2258*6621Sbt150084 
2259*6621Sbt150084 	mac = &ixgbe->hw.mac;
2260*6621Sbt150084 	phy = &ixgbe->hw.phy;
2261*6621Sbt150084 	invalid = B_FALSE;
2262*6621Sbt150084 
2263*6621Sbt150084 	if (ixgbe->param_adv_autoneg_cap == 1) {
2264*6621Sbt150084 		mac->autoneg = B_TRUE;
2265*6621Sbt150084 		phy->autoneg_advertised = 0;
2266*6621Sbt150084 
2267*6621Sbt150084 		/*
2268*6621Sbt150084 		 * No half duplex support with 10Gb parts
2269*6621Sbt150084 		 */
2270*6621Sbt150084 		if (ixgbe->param_adv_10000fdx_cap == 1)
2271*6621Sbt150084 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
2272*6621Sbt150084 
2273*6621Sbt150084 		if (ixgbe->param_adv_1000fdx_cap == 1)
2274*6621Sbt150084 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
2275*6621Sbt150084 
2276*6621Sbt150084 		if (ixgbe->param_adv_100fdx_cap == 1)
2277*6621Sbt150084 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
2278*6621Sbt150084 
2279*6621Sbt150084 		if (phy->autoneg_advertised == 0)
2280*6621Sbt150084 			invalid = B_TRUE;
2281*6621Sbt150084 	} else {
2282*6621Sbt150084 		ixgbe->hw.mac.autoneg = B_FALSE;
2283*6621Sbt150084 	}
2284*6621Sbt150084 
2285*6621Sbt150084 	if (invalid) {
2286*6621Sbt150084 		ixgbe_notice(ixgbe, "Invalid link settings. Setup link to "
2287*6621Sbt150084 		    "autonegotiation with full link capabilities.");
2288*6621Sbt150084 		ixgbe->hw.mac.autoneg = B_TRUE;
2289*6621Sbt150084 	}
2290*6621Sbt150084 
2291*6621Sbt150084 	if (setup_hw) {
2292*6621Sbt150084 		if (ixgbe_setup_link(&ixgbe->hw) != IXGBE_SUCCESS)
2293*6621Sbt150084 			return (IXGBE_FAILURE);
2294*6621Sbt150084 	}
2295*6621Sbt150084 
2296*6621Sbt150084 	return (IXGBE_SUCCESS);
2297*6621Sbt150084 }
2298*6621Sbt150084 
2299*6621Sbt150084 /*
2300*6621Sbt150084  * ixgbe_driver_link_check - Link status processing.
2301*6621Sbt150084  */
2302*6621Sbt150084 static boolean_t
2303*6621Sbt150084 ixgbe_driver_link_check(ixgbe_t *ixgbe)
2304*6621Sbt150084 {
2305*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
2306*6621Sbt150084 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
2307*6621Sbt150084 	boolean_t link_up = B_FALSE;
2308*6621Sbt150084 	boolean_t link_changed = B_FALSE;
2309*6621Sbt150084 
2310*6621Sbt150084 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2311*6621Sbt150084 
2312*6621Sbt150084 	(void) ixgbe_check_link(hw, &speed, &link_up);
2313*6621Sbt150084 	if (link_up) {
2314*6621Sbt150084 		/*
2315*6621Sbt150084 		 * The Link is up, check whether it was marked as down earlier
2316*6621Sbt150084 		 */
2317*6621Sbt150084 		if (ixgbe->link_state != LINK_STATE_UP) {
2318*6621Sbt150084 			switch (speed) {
2319*6621Sbt150084 				case IXGBE_LINK_SPEED_10GB_FULL:
2320*6621Sbt150084 					ixgbe->link_speed = SPEED_10GB;
2321*6621Sbt150084 					break;
2322*6621Sbt150084 				case IXGBE_LINK_SPEED_1GB_FULL:
2323*6621Sbt150084 					ixgbe->link_speed = SPEED_1GB;
2324*6621Sbt150084 					break;
2325*6621Sbt150084 				case IXGBE_LINK_SPEED_100_FULL:
2326*6621Sbt150084 					ixgbe->link_speed = SPEED_100;
2327*6621Sbt150084 			}
2328*6621Sbt150084 			ixgbe->link_duplex = LINK_DUPLEX_FULL;
2329*6621Sbt150084 			ixgbe->link_state = LINK_STATE_UP;
2330*6621Sbt150084 			ixgbe->link_down_timeout = 0;
2331*6621Sbt150084 			link_changed = B_TRUE;
2332*6621Sbt150084 		}
2333*6621Sbt150084 	} else {
2334*6621Sbt150084 		if (ixgbe->link_state != LINK_STATE_DOWN) {
2335*6621Sbt150084 			ixgbe->link_speed = 0;
2336*6621Sbt150084 			ixgbe->link_duplex = 0;
2337*6621Sbt150084 			ixgbe->link_state = LINK_STATE_DOWN;
2338*6621Sbt150084 			link_changed = B_TRUE;
2339*6621Sbt150084 		}
2340*6621Sbt150084 
2341*6621Sbt150084 		if (ixgbe->ixgbe_state & IXGBE_STARTED) {
2342*6621Sbt150084 			if (ixgbe->link_down_timeout < MAX_LINK_DOWN_TIMEOUT) {
2343*6621Sbt150084 				ixgbe->link_down_timeout++;
2344*6621Sbt150084 			} else if (ixgbe->link_down_timeout ==
2345*6621Sbt150084 			    MAX_LINK_DOWN_TIMEOUT) {
2346*6621Sbt150084 				ixgbe_tx_clean(ixgbe);
2347*6621Sbt150084 				ixgbe->link_down_timeout++;
2348*6621Sbt150084 			}
2349*6621Sbt150084 		}
2350*6621Sbt150084 	}
2351*6621Sbt150084 
2352*6621Sbt150084 	return (link_changed);
2353*6621Sbt150084 }
2354*6621Sbt150084 
2355*6621Sbt150084 /*
2356*6621Sbt150084  * ixgbe_local_timer - Driver watchdog function.
2357*6621Sbt150084  *
2358*6621Sbt150084  * This function will handle the transmit stall check, link status check and
2359*6621Sbt150084  * other routines.
2360*6621Sbt150084  */
2361*6621Sbt150084 static void
2362*6621Sbt150084 ixgbe_local_timer(void *arg)
2363*6621Sbt150084 {
2364*6621Sbt150084 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2365*6621Sbt150084 
2366*6621Sbt150084 	if (ixgbe_stall_check(ixgbe)) {
2367*6621Sbt150084 		ixgbe->reset_count++;
2368*6621Sbt150084 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
2369*6621Sbt150084 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
2370*6621Sbt150084 	}
2371*6621Sbt150084 
2372*6621Sbt150084 	ixgbe_restart_watchdog_timer(ixgbe);
2373*6621Sbt150084 }
2374*6621Sbt150084 
2375*6621Sbt150084 /*
2376*6621Sbt150084  * ixgbe_stall_check - Check for transmit stall.
2377*6621Sbt150084  *
2378*6621Sbt150084  * This function checks if the adapter is stalled (in transmit).
2379*6621Sbt150084  *
2380*6621Sbt150084  * It is called each time the watchdog timeout is invoked.
2381*6621Sbt150084  * If the transmit descriptor reclaim continuously fails,
2382*6621Sbt150084  * the watchdog value will increment by 1. If the watchdog
2383*6621Sbt150084  * value exceeds the threshold, the ixgbe is assumed to
2384*6621Sbt150084  * have stalled and need to be reset.
2385*6621Sbt150084  */
2386*6621Sbt150084 static boolean_t
2387*6621Sbt150084 ixgbe_stall_check(ixgbe_t *ixgbe)
2388*6621Sbt150084 {
2389*6621Sbt150084 	ixgbe_tx_ring_t *tx_ring;
2390*6621Sbt150084 	boolean_t result;
2391*6621Sbt150084 	int i;
2392*6621Sbt150084 
2393*6621Sbt150084 	if (ixgbe->link_state != LINK_STATE_UP)
2394*6621Sbt150084 		return (B_FALSE);
2395*6621Sbt150084 
2396*6621Sbt150084 	/*
2397*6621Sbt150084 	 * If any tx ring is stalled, we'll reset the chipset
2398*6621Sbt150084 	 */
2399*6621Sbt150084 	result = B_FALSE;
2400*6621Sbt150084 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2401*6621Sbt150084 		tx_ring = &ixgbe->tx_rings[i];
2402*6621Sbt150084 
2403*6621Sbt150084 		if (tx_ring->recycle_fail > 0)
2404*6621Sbt150084 			tx_ring->stall_watchdog++;
2405*6621Sbt150084 		else
2406*6621Sbt150084 			tx_ring->stall_watchdog = 0;
2407*6621Sbt150084 
2408*6621Sbt150084 		if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
2409*6621Sbt150084 			result = B_TRUE;
2410*6621Sbt150084 			break;
2411*6621Sbt150084 		}
2412*6621Sbt150084 	}
2413*6621Sbt150084 
2414*6621Sbt150084 	if (result) {
2415*6621Sbt150084 		tx_ring->stall_watchdog = 0;
2416*6621Sbt150084 		tx_ring->recycle_fail = 0;
2417*6621Sbt150084 	}
2418*6621Sbt150084 
2419*6621Sbt150084 	return (result);
2420*6621Sbt150084 }
2421*6621Sbt150084 
2422*6621Sbt150084 
2423*6621Sbt150084 /*
2424*6621Sbt150084  * is_valid_mac_addr - Check if the mac address is valid.
2425*6621Sbt150084  */
2426*6621Sbt150084 static boolean_t
2427*6621Sbt150084 is_valid_mac_addr(uint8_t *mac_addr)
2428*6621Sbt150084 {
2429*6621Sbt150084 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
2430*6621Sbt150084 	const uint8_t addr_test2[6] =
2431*6621Sbt150084 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2432*6621Sbt150084 
2433*6621Sbt150084 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
2434*6621Sbt150084 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
2435*6621Sbt150084 		return (B_FALSE);
2436*6621Sbt150084 
2437*6621Sbt150084 	return (B_TRUE);
2438*6621Sbt150084 }
2439*6621Sbt150084 
2440*6621Sbt150084 static boolean_t
2441*6621Sbt150084 ixgbe_find_mac_address(ixgbe_t *ixgbe)
2442*6621Sbt150084 {
2443*6621Sbt150084 #ifdef __sparc
2444*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
2445*6621Sbt150084 	uchar_t *bytes;
2446*6621Sbt150084 	struct ether_addr sysaddr;
2447*6621Sbt150084 	uint_t nelts;
2448*6621Sbt150084 	int err;
2449*6621Sbt150084 	boolean_t found = B_FALSE;
2450*6621Sbt150084 
2451*6621Sbt150084 	/*
2452*6621Sbt150084 	 * The "vendor's factory-set address" may already have
2453*6621Sbt150084 	 * been extracted from the chip, but if the property
2454*6621Sbt150084 	 * "local-mac-address" is set we use that instead.
2455*6621Sbt150084 	 *
2456*6621Sbt150084 	 * We check whether it looks like an array of 6
2457*6621Sbt150084 	 * bytes (which it should, if OBP set it).  If we can't
2458*6621Sbt150084 	 * make sense of it this way, we'll ignore it.
2459*6621Sbt150084 	 */
2460*6621Sbt150084 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2461*6621Sbt150084 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
2462*6621Sbt150084 	if (err == DDI_PROP_SUCCESS) {
2463*6621Sbt150084 		if (nelts == ETHERADDRL) {
2464*6621Sbt150084 			while (nelts--)
2465*6621Sbt150084 				hw->mac.addr[nelts] = bytes[nelts];
2466*6621Sbt150084 			found = B_TRUE;
2467*6621Sbt150084 		}
2468*6621Sbt150084 		ddi_prop_free(bytes);
2469*6621Sbt150084 	}
2470*6621Sbt150084 
2471*6621Sbt150084 	/*
2472*6621Sbt150084 	 * Look up the OBP property "local-mac-address?". If the user has set
2473*6621Sbt150084 	 * 'local-mac-address? = false', use "the system address" instead.
2474*6621Sbt150084 	 */
2475*6621Sbt150084 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
2476*6621Sbt150084 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
2477*6621Sbt150084 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
2478*6621Sbt150084 			if (localetheraddr(NULL, &sysaddr) != 0) {
2479*6621Sbt150084 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
2480*6621Sbt150084 				found = B_TRUE;
2481*6621Sbt150084 			}
2482*6621Sbt150084 		}
2483*6621Sbt150084 		ddi_prop_free(bytes);
2484*6621Sbt150084 	}
2485*6621Sbt150084 
2486*6621Sbt150084 	/*
2487*6621Sbt150084 	 * Finally(!), if there's a valid "mac-address" property (created
2488*6621Sbt150084 	 * if we netbooted from this interface), we must use this instead
2489*6621Sbt150084 	 * of any of the above to ensure that the NFS/install server doesn't
2490*6621Sbt150084 	 * get confused by the address changing as Solaris takes over!
2491*6621Sbt150084 	 */
2492*6621Sbt150084 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2493*6621Sbt150084 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
2494*6621Sbt150084 	if (err == DDI_PROP_SUCCESS) {
2495*6621Sbt150084 		if (nelts == ETHERADDRL) {
2496*6621Sbt150084 			while (nelts--)
2497*6621Sbt150084 				hw->mac.addr[nelts] = bytes[nelts];
2498*6621Sbt150084 			found = B_TRUE;
2499*6621Sbt150084 		}
2500*6621Sbt150084 		ddi_prop_free(bytes);
2501*6621Sbt150084 	}
2502*6621Sbt150084 
2503*6621Sbt150084 	if (found) {
2504*6621Sbt150084 		bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
2505*6621Sbt150084 		return (B_TRUE);
2506*6621Sbt150084 	}
2507*6621Sbt150084 #else
2508*6621Sbt150084 	_NOTE(ARGUNUSED(ixgbe));
2509*6621Sbt150084 #endif
2510*6621Sbt150084 
2511*6621Sbt150084 	return (B_TRUE);
2512*6621Sbt150084 }
2513*6621Sbt150084 
2514*6621Sbt150084 #pragma inline(ixgbe_arm_watchdog_timer)
2515*6621Sbt150084 static void
2516*6621Sbt150084 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
2517*6621Sbt150084 {
2518*6621Sbt150084 	/*
2519*6621Sbt150084 	 * Fire a watchdog timer
2520*6621Sbt150084 	 */
2521*6621Sbt150084 	ixgbe->watchdog_tid =
2522*6621Sbt150084 	    timeout(ixgbe_local_timer,
2523*6621Sbt150084 	    (void *)ixgbe, 1 * drv_usectohz(1000000));
2524*6621Sbt150084 
2525*6621Sbt150084 }
2526*6621Sbt150084 
2527*6621Sbt150084 /*
2528*6621Sbt150084  * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
2529*6621Sbt150084  */
2530*6621Sbt150084 void
2531*6621Sbt150084 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
2532*6621Sbt150084 {
2533*6621Sbt150084 	mutex_enter(&ixgbe->watchdog_lock);
2534*6621Sbt150084 
2535*6621Sbt150084 	if (!ixgbe->watchdog_enable) {
2536*6621Sbt150084 		ixgbe->watchdog_enable = B_TRUE;
2537*6621Sbt150084 		ixgbe->watchdog_start = B_TRUE;
2538*6621Sbt150084 		ixgbe_arm_watchdog_timer(ixgbe);
2539*6621Sbt150084 	}
2540*6621Sbt150084 
2541*6621Sbt150084 	mutex_exit(&ixgbe->watchdog_lock);
2542*6621Sbt150084 }
2543*6621Sbt150084 
2544*6621Sbt150084 /*
2545*6621Sbt150084  * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
2546*6621Sbt150084  */
2547*6621Sbt150084 void
2548*6621Sbt150084 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
2549*6621Sbt150084 {
2550*6621Sbt150084 	timeout_id_t tid;
2551*6621Sbt150084 
2552*6621Sbt150084 	mutex_enter(&ixgbe->watchdog_lock);
2553*6621Sbt150084 
2554*6621Sbt150084 	ixgbe->watchdog_enable = B_FALSE;
2555*6621Sbt150084 	ixgbe->watchdog_start = B_FALSE;
2556*6621Sbt150084 	tid = ixgbe->watchdog_tid;
2557*6621Sbt150084 	ixgbe->watchdog_tid = 0;
2558*6621Sbt150084 
2559*6621Sbt150084 	mutex_exit(&ixgbe->watchdog_lock);
2560*6621Sbt150084 
2561*6621Sbt150084 	if (tid != 0)
2562*6621Sbt150084 		(void) untimeout(tid);
2563*6621Sbt150084 }
2564*6621Sbt150084 
2565*6621Sbt150084 /*
2566*6621Sbt150084  * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
2567*6621Sbt150084  */
2568*6621Sbt150084 static void
2569*6621Sbt150084 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
2570*6621Sbt150084 {
2571*6621Sbt150084 	mutex_enter(&ixgbe->watchdog_lock);
2572*6621Sbt150084 
2573*6621Sbt150084 	if (ixgbe->watchdog_enable) {
2574*6621Sbt150084 		if (!ixgbe->watchdog_start) {
2575*6621Sbt150084 			ixgbe->watchdog_start = B_TRUE;
2576*6621Sbt150084 			ixgbe_arm_watchdog_timer(ixgbe);
2577*6621Sbt150084 		}
2578*6621Sbt150084 	}
2579*6621Sbt150084 
2580*6621Sbt150084 	mutex_exit(&ixgbe->watchdog_lock);
2581*6621Sbt150084 }
2582*6621Sbt150084 
2583*6621Sbt150084 /*
2584*6621Sbt150084  * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
2585*6621Sbt150084  */
2586*6621Sbt150084 static void
2587*6621Sbt150084 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
2588*6621Sbt150084 {
2589*6621Sbt150084 	mutex_enter(&ixgbe->watchdog_lock);
2590*6621Sbt150084 
2591*6621Sbt150084 	if (ixgbe->watchdog_start)
2592*6621Sbt150084 		ixgbe_arm_watchdog_timer(ixgbe);
2593*6621Sbt150084 
2594*6621Sbt150084 	mutex_exit(&ixgbe->watchdog_lock);
2595*6621Sbt150084 }
2596*6621Sbt150084 
2597*6621Sbt150084 /*
2598*6621Sbt150084  * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
2599*6621Sbt150084  */
2600*6621Sbt150084 static void
2601*6621Sbt150084 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
2602*6621Sbt150084 {
2603*6621Sbt150084 	timeout_id_t tid;
2604*6621Sbt150084 
2605*6621Sbt150084 	mutex_enter(&ixgbe->watchdog_lock);
2606*6621Sbt150084 
2607*6621Sbt150084 	ixgbe->watchdog_start = B_FALSE;
2608*6621Sbt150084 	tid = ixgbe->watchdog_tid;
2609*6621Sbt150084 	ixgbe->watchdog_tid = 0;
2610*6621Sbt150084 
2611*6621Sbt150084 	mutex_exit(&ixgbe->watchdog_lock);
2612*6621Sbt150084 
2613*6621Sbt150084 	if (tid != 0)
2614*6621Sbt150084 		(void) untimeout(tid);
2615*6621Sbt150084 }
2616*6621Sbt150084 
2617*6621Sbt150084 /*
2618*6621Sbt150084  * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
2619*6621Sbt150084  */
2620*6621Sbt150084 static void
2621*6621Sbt150084 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
2622*6621Sbt150084 {
2623*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
2624*6621Sbt150084 
2625*6621Sbt150084 	/*
2626*6621Sbt150084 	 * mask all interrupts off
2627*6621Sbt150084 	 */
2628*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
2629*6621Sbt150084 
2630*6621Sbt150084 	/*
2631*6621Sbt150084 	 * for MSI-X, also disable autoclear
2632*6621Sbt150084 	 */
2633*6621Sbt150084 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
2634*6621Sbt150084 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
2635*6621Sbt150084 	}
2636*6621Sbt150084 
2637*6621Sbt150084 	IXGBE_WRITE_FLUSH(hw);
2638*6621Sbt150084 }
2639*6621Sbt150084 
2640*6621Sbt150084 /*
2641*6621Sbt150084  * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
2642*6621Sbt150084  */
2643*6621Sbt150084 static void
2644*6621Sbt150084 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
2645*6621Sbt150084 {
2646*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
2647*6621Sbt150084 	uint32_t eims, eiac, gpie;
2648*6621Sbt150084 
2649*6621Sbt150084 	gpie = 0;
2650*6621Sbt150084 	eims = IXGBE_EIMS_ENABLE_MASK;	/* shared code default */
2651*6621Sbt150084 	eims &= ~IXGBE_EIMS_TCP_TIMER;	/* minus tcp timer */
2652*6621Sbt150084 
2653*6621Sbt150084 	/*
2654*6621Sbt150084 	 * msi-x mode
2655*6621Sbt150084 	 */
2656*6621Sbt150084 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
2657*6621Sbt150084 		/* enable autoclear but not on bits 29:20 */
2658*6621Sbt150084 		eiac = (eims & ~0x3ff00000);
2659*6621Sbt150084 
2660*6621Sbt150084 		/* general purpose interrupt enable */
2661*6621Sbt150084 		gpie |= (IXGBE_GPIE_MSIX_MODE |
2662*6621Sbt150084 		    IXGBE_GPIE_PBA_SUPPORT |IXGBE_GPIE_OCD);
2663*6621Sbt150084 	/*
2664*6621Sbt150084 	 * non-msi-x mode
2665*6621Sbt150084 	 */
2666*6621Sbt150084 	} else {
2667*6621Sbt150084 
2668*6621Sbt150084 		/* disable autoclear, leave gpie at default */
2669*6621Sbt150084 		eiac = 0;
2670*6621Sbt150084 	}
2671*6621Sbt150084 
2672*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims);
2673*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
2674*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2675*6621Sbt150084 	IXGBE_WRITE_FLUSH(hw);
2676*6621Sbt150084 }
2677*6621Sbt150084 
2678*6621Sbt150084 /*
2679*6621Sbt150084  * ixgbe_loopback_ioctl - Loopback support.
2680*6621Sbt150084  */
2681*6621Sbt150084 enum ioc_reply
2682*6621Sbt150084 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
2683*6621Sbt150084 {
2684*6621Sbt150084 	lb_info_sz_t *lbsp;
2685*6621Sbt150084 	lb_property_t *lbpp;
2686*6621Sbt150084 	uint32_t *lbmp;
2687*6621Sbt150084 	uint32_t size;
2688*6621Sbt150084 	uint32_t value;
2689*6621Sbt150084 
2690*6621Sbt150084 	if (mp->b_cont == NULL)
2691*6621Sbt150084 		return (IOC_INVAL);
2692*6621Sbt150084 
2693*6621Sbt150084 	switch (iocp->ioc_cmd) {
2694*6621Sbt150084 	default:
2695*6621Sbt150084 		return (IOC_INVAL);
2696*6621Sbt150084 
2697*6621Sbt150084 	case LB_GET_INFO_SIZE:
2698*6621Sbt150084 		size = sizeof (lb_info_sz_t);
2699*6621Sbt150084 		if (iocp->ioc_count != size)
2700*6621Sbt150084 			return (IOC_INVAL);
2701*6621Sbt150084 
2702*6621Sbt150084 		value = sizeof (lb_normal);
2703*6621Sbt150084 		value += sizeof (lb_mac);
2704*6621Sbt150084 
2705*6621Sbt150084 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
2706*6621Sbt150084 		*lbsp = value;
2707*6621Sbt150084 		break;
2708*6621Sbt150084 
2709*6621Sbt150084 	case LB_GET_INFO:
2710*6621Sbt150084 		value = sizeof (lb_normal);
2711*6621Sbt150084 		value += sizeof (lb_mac);
2712*6621Sbt150084 
2713*6621Sbt150084 		size = value;
2714*6621Sbt150084 		if (iocp->ioc_count != size)
2715*6621Sbt150084 			return (IOC_INVAL);
2716*6621Sbt150084 
2717*6621Sbt150084 		value = 0;
2718*6621Sbt150084 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
2719*6621Sbt150084 
2720*6621Sbt150084 		lbpp[value++] = lb_normal;
2721*6621Sbt150084 		lbpp[value++] = lb_mac;
2722*6621Sbt150084 		break;
2723*6621Sbt150084 
2724*6621Sbt150084 	case LB_GET_MODE:
2725*6621Sbt150084 		size = sizeof (uint32_t);
2726*6621Sbt150084 		if (iocp->ioc_count != size)
2727*6621Sbt150084 			return (IOC_INVAL);
2728*6621Sbt150084 
2729*6621Sbt150084 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
2730*6621Sbt150084 		*lbmp = ixgbe->loopback_mode;
2731*6621Sbt150084 		break;
2732*6621Sbt150084 
2733*6621Sbt150084 	case LB_SET_MODE:
2734*6621Sbt150084 		size = 0;
2735*6621Sbt150084 		if (iocp->ioc_count != sizeof (uint32_t))
2736*6621Sbt150084 			return (IOC_INVAL);
2737*6621Sbt150084 
2738*6621Sbt150084 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
2739*6621Sbt150084 		if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
2740*6621Sbt150084 			return (IOC_INVAL);
2741*6621Sbt150084 		break;
2742*6621Sbt150084 	}
2743*6621Sbt150084 
2744*6621Sbt150084 	iocp->ioc_count = size;
2745*6621Sbt150084 	iocp->ioc_error = 0;
2746*6621Sbt150084 
2747*6621Sbt150084 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2748*6621Sbt150084 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2749*6621Sbt150084 		return (IOC_INVAL);
2750*6621Sbt150084 	}
2751*6621Sbt150084 
2752*6621Sbt150084 	return (IOC_REPLY);
2753*6621Sbt150084 }
2754*6621Sbt150084 
2755*6621Sbt150084 /*
2756*6621Sbt150084  * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
2757*6621Sbt150084  */
2758*6621Sbt150084 static boolean_t
2759*6621Sbt150084 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
2760*6621Sbt150084 {
2761*6621Sbt150084 	struct ixgbe_hw *hw;
2762*6621Sbt150084 
2763*6621Sbt150084 	if (mode == ixgbe->loopback_mode)
2764*6621Sbt150084 		return (B_TRUE);
2765*6621Sbt150084 
2766*6621Sbt150084 	hw = &ixgbe->hw;
2767*6621Sbt150084 
2768*6621Sbt150084 	ixgbe->loopback_mode = mode;
2769*6621Sbt150084 
2770*6621Sbt150084 	if (mode == IXGBE_LB_NONE) {
2771*6621Sbt150084 		/*
2772*6621Sbt150084 		 * Reset the chip
2773*6621Sbt150084 		 */
2774*6621Sbt150084 		hw->phy.autoneg_wait_to_complete = B_TRUE;
2775*6621Sbt150084 		(void) ixgbe_reset(ixgbe);
2776*6621Sbt150084 		hw->phy.autoneg_wait_to_complete = B_FALSE;
2777*6621Sbt150084 		return (B_TRUE);
2778*6621Sbt150084 	}
2779*6621Sbt150084 
2780*6621Sbt150084 	mutex_enter(&ixgbe->gen_lock);
2781*6621Sbt150084 
2782*6621Sbt150084 	switch (mode) {
2783*6621Sbt150084 	default:
2784*6621Sbt150084 		mutex_exit(&ixgbe->gen_lock);
2785*6621Sbt150084 		return (B_FALSE);
2786*6621Sbt150084 
2787*6621Sbt150084 	case IXGBE_LB_INTERNAL_MAC:
2788*6621Sbt150084 		ixgbe_set_internal_mac_loopback(ixgbe);
2789*6621Sbt150084 		break;
2790*6621Sbt150084 	}
2791*6621Sbt150084 
2792*6621Sbt150084 	mutex_exit(&ixgbe->gen_lock);
2793*6621Sbt150084 
2794*6621Sbt150084 	return (B_TRUE);
2795*6621Sbt150084 }
2796*6621Sbt150084 
2797*6621Sbt150084 /*
2798*6621Sbt150084  * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
2799*6621Sbt150084  */
2800*6621Sbt150084 static void
2801*6621Sbt150084 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
2802*6621Sbt150084 {
2803*6621Sbt150084 	struct ixgbe_hw *hw;
2804*6621Sbt150084 	uint32_t reg;
2805*6621Sbt150084 	uint8_t atlas;
2806*6621Sbt150084 
2807*6621Sbt150084 	hw = &ixgbe->hw;
2808*6621Sbt150084 
2809*6621Sbt150084 	/*
2810*6621Sbt150084 	 * Setup MAC loopback
2811*6621Sbt150084 	 */
2812*6621Sbt150084 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
2813*6621Sbt150084 	reg |= IXGBE_HLREG0_LPBK;
2814*6621Sbt150084 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
2815*6621Sbt150084 
2816*6621Sbt150084 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
2817*6621Sbt150084 	reg &= ~IXGBE_AUTOC_LMS_MASK;
2818*6621Sbt150084 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
2819*6621Sbt150084 
2820*6621Sbt150084 	/*
2821*6621Sbt150084 	 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
2822*6621Sbt150084 	 */
2823*6621Sbt150084 	if (hw->mac.type == ixgbe_mac_82598EB) {
2824*6621Sbt150084 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
2825*6621Sbt150084 		    &atlas);
2826*6621Sbt150084 		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
2827*6621Sbt150084 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
2828*6621Sbt150084 		    atlas);
2829*6621Sbt150084 
2830*6621Sbt150084 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
2831*6621Sbt150084 		    &atlas);
2832*6621Sbt150084 		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
2833*6621Sbt150084 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
2834*6621Sbt150084 		    atlas);
2835*6621Sbt150084 
2836*6621Sbt150084 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
2837*6621Sbt150084 		    &atlas);
2838*6621Sbt150084 		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
2839*6621Sbt150084 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
2840*6621Sbt150084 		    atlas);
2841*6621Sbt150084 
2842*6621Sbt150084 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
2843*6621Sbt150084 		    &atlas);
2844*6621Sbt150084 		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
2845*6621Sbt150084 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
2846*6621Sbt150084 		    atlas);
2847*6621Sbt150084 	}
2848*6621Sbt150084 }
2849*6621Sbt150084 
2850*6621Sbt150084 #pragma inline(ixgbe_intr_rx_work)
2851*6621Sbt150084 /*
2852*6621Sbt150084  * ixgbe_intr_rx_work - RX processing of ISR.
2853*6621Sbt150084  */
2854*6621Sbt150084 static void
2855*6621Sbt150084 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
2856*6621Sbt150084 {
2857*6621Sbt150084 	mblk_t *mp;
2858*6621Sbt150084 
2859*6621Sbt150084 	mutex_enter(&rx_ring->rx_lock);
2860*6621Sbt150084 
2861*6621Sbt150084 	mp = ixgbe_rx(rx_ring);
2862*6621Sbt150084 	mutex_exit(&rx_ring->rx_lock);
2863*6621Sbt150084 
2864*6621Sbt150084 	if (mp != NULL)
2865*6621Sbt150084 		mac_rx(rx_ring->ixgbe->mac_hdl, NULL, mp);
2866*6621Sbt150084 }
2867*6621Sbt150084 
2868*6621Sbt150084 #pragma inline(ixgbe_intr_tx_work)
2869*6621Sbt150084 /*
2870*6621Sbt150084  * ixgbe_intr_tx_work - TX processing of ISR.
2871*6621Sbt150084  */
2872*6621Sbt150084 static void
2873*6621Sbt150084 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
2874*6621Sbt150084 {
2875*6621Sbt150084 	/*
2876*6621Sbt150084 	 * Recycle the tx descriptors
2877*6621Sbt150084 	 */
2878*6621Sbt150084 	tx_ring->tx_recycle(tx_ring);
2879*6621Sbt150084 
2880*6621Sbt150084 	/*
2881*6621Sbt150084 	 * Schedule the re-transmit
2882*6621Sbt150084 	 */
2883*6621Sbt150084 	if (tx_ring->reschedule &&
2884*6621Sbt150084 	    (tx_ring->tbd_free >= tx_ring->resched_thresh)) {
2885*6621Sbt150084 		tx_ring->reschedule = B_FALSE;
2886*6621Sbt150084 		mac_tx_update(tx_ring->ixgbe->mac_hdl);
2887*6621Sbt150084 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
2888*6621Sbt150084 	}
2889*6621Sbt150084 }
2890*6621Sbt150084 
2891*6621Sbt150084 #pragma inline(ixgbe_intr_other_work)
2892*6621Sbt150084 /*
2893*6621Sbt150084  * ixgbe_intr_other_work - Other processing of ISR.
2894*6621Sbt150084  */
2895*6621Sbt150084 static void
2896*6621Sbt150084 ixgbe_intr_other_work(ixgbe_t *ixgbe)
2897*6621Sbt150084 {
2898*6621Sbt150084 	boolean_t link_changed;
2899*6621Sbt150084 
2900*6621Sbt150084 	ixgbe_stop_watchdog_timer(ixgbe);
2901*6621Sbt150084 
2902*6621Sbt150084 	mutex_enter(&ixgbe->gen_lock);
2903*6621Sbt150084 
2904*6621Sbt150084 	/*
2905*6621Sbt150084 	 * Take care of link status change
2906*6621Sbt150084 	 */
2907*6621Sbt150084 	link_changed = ixgbe_driver_link_check(ixgbe);
2908*6621Sbt150084 
2909*6621Sbt150084 	/*
2910*6621Sbt150084 	 * Get new phy state
2911*6621Sbt150084 	 */
2912*6621Sbt150084 	ixgbe_get_hw_state(ixgbe);
2913*6621Sbt150084 
2914*6621Sbt150084 	mutex_exit(&ixgbe->gen_lock);
2915*6621Sbt150084 
2916*6621Sbt150084 	if (link_changed)
2917*6621Sbt150084 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
2918*6621Sbt150084 
2919*6621Sbt150084 	ixgbe_start_watchdog_timer(ixgbe);
2920*6621Sbt150084 }
2921*6621Sbt150084 
2922*6621Sbt150084 /*
2923*6621Sbt150084  * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
2924*6621Sbt150084  */
2925*6621Sbt150084 static uint_t
2926*6621Sbt150084 ixgbe_intr_legacy(void *arg1, void *arg2)
2927*6621Sbt150084 {
2928*6621Sbt150084 	_NOTE(ARGUNUSED(arg2));
2929*6621Sbt150084 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
2930*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
2931*6621Sbt150084 	ixgbe_tx_ring_t *tx_ring;
2932*6621Sbt150084 	uint32_t eicr;
2933*6621Sbt150084 	mblk_t *mp;
2934*6621Sbt150084 	boolean_t tx_reschedule;
2935*6621Sbt150084 	boolean_t link_changed;
2936*6621Sbt150084 	uint_t result;
2937*6621Sbt150084 
2938*6621Sbt150084 
2939*6621Sbt150084 	mutex_enter(&ixgbe->gen_lock);
2940*6621Sbt150084 
2941*6621Sbt150084 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
2942*6621Sbt150084 		mutex_exit(&ixgbe->gen_lock);
2943*6621Sbt150084 		return (DDI_INTR_UNCLAIMED);
2944*6621Sbt150084 	}
2945*6621Sbt150084 
2946*6621Sbt150084 	mp = NULL;
2947*6621Sbt150084 	tx_reschedule = B_FALSE;
2948*6621Sbt150084 	link_changed = B_FALSE;
2949*6621Sbt150084 
2950*6621Sbt150084 	/*
2951*6621Sbt150084 	 * Any bit set in eicr: claim this interrupt
2952*6621Sbt150084 	 */
2953*6621Sbt150084 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2954*6621Sbt150084 	if (eicr) {
2955*6621Sbt150084 		/*
2956*6621Sbt150084 		 * For legacy interrupt, we have only one interrupt,
2957*6621Sbt150084 		 * so we have only one rx ring and one tx ring enabled.
2958*6621Sbt150084 		 */
2959*6621Sbt150084 		ASSERT(ixgbe->num_rx_rings == 1);
2960*6621Sbt150084 		ASSERT(ixgbe->num_tx_rings == 1);
2961*6621Sbt150084 
2962*6621Sbt150084 		/*
2963*6621Sbt150084 		 * For legacy interrupt, we can't differentiate
2964*6621Sbt150084 		 * between tx and rx, so always clean both
2965*6621Sbt150084 		 */
2966*6621Sbt150084 		if (eicr & IXGBE_EICR_RTX_QUEUE) {
2967*6621Sbt150084 
2968*6621Sbt150084 			/*
2969*6621Sbt150084 			 * Clean the rx descriptors
2970*6621Sbt150084 			 */
2971*6621Sbt150084 			mp = ixgbe_rx(&ixgbe->rx_rings[0]);
2972*6621Sbt150084 
2973*6621Sbt150084 			/*
2974*6621Sbt150084 			 * Recycle the tx descriptors
2975*6621Sbt150084 			 */
2976*6621Sbt150084 			tx_ring = &ixgbe->tx_rings[0];
2977*6621Sbt150084 			tx_ring->tx_recycle(tx_ring);
2978*6621Sbt150084 
2979*6621Sbt150084 			/*
2980*6621Sbt150084 			 * Schedule the re-transmit
2981*6621Sbt150084 			 */
2982*6621Sbt150084 			tx_reschedule = (tx_ring->reschedule &&
2983*6621Sbt150084 			    (tx_ring->tbd_free >= tx_ring->resched_thresh));
2984*6621Sbt150084 		}
2985*6621Sbt150084 
2986*6621Sbt150084 		if (eicr & IXGBE_EICR_LSC) {
2987*6621Sbt150084 
2988*6621Sbt150084 			/* take care of link status change */
2989*6621Sbt150084 			link_changed = ixgbe_driver_link_check(ixgbe);
2990*6621Sbt150084 
2991*6621Sbt150084 			/* Get new phy state */
2992*6621Sbt150084 			ixgbe_get_hw_state(ixgbe);
2993*6621Sbt150084 		}
2994*6621Sbt150084 
2995*6621Sbt150084 		result = DDI_INTR_CLAIMED;
2996*6621Sbt150084 	} else {
2997*6621Sbt150084 		/*
2998*6621Sbt150084 		 * No interrupt cause bits set: don't claim this interrupt.
2999*6621Sbt150084 		 */
3000*6621Sbt150084 		result = DDI_INTR_UNCLAIMED;
3001*6621Sbt150084 	}
3002*6621Sbt150084 
3003*6621Sbt150084 	mutex_exit(&ixgbe->gen_lock);
3004*6621Sbt150084 
3005*6621Sbt150084 	/*
3006*6621Sbt150084 	 * Do the following work outside of the gen_lock
3007*6621Sbt150084 	 */
3008*6621Sbt150084 	if (mp != NULL)
3009*6621Sbt150084 		mac_rx(ixgbe->mac_hdl, NULL, mp);
3010*6621Sbt150084 
3011*6621Sbt150084 	if (tx_reschedule)  {
3012*6621Sbt150084 		tx_ring->reschedule = B_FALSE;
3013*6621Sbt150084 		mac_tx_update(ixgbe->mac_hdl);
3014*6621Sbt150084 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3015*6621Sbt150084 	}
3016*6621Sbt150084 
3017*6621Sbt150084 	if (link_changed)
3018*6621Sbt150084 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3019*6621Sbt150084 
3020*6621Sbt150084 	return (result);
3021*6621Sbt150084 }
3022*6621Sbt150084 
3023*6621Sbt150084 /*
3024*6621Sbt150084  * ixgbe_intr_msi - Interrupt handler for MSI.
3025*6621Sbt150084  */
3026*6621Sbt150084 static uint_t
3027*6621Sbt150084 ixgbe_intr_msi(void *arg1, void *arg2)
3028*6621Sbt150084 {
3029*6621Sbt150084 	_NOTE(ARGUNUSED(arg2));
3030*6621Sbt150084 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3031*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
3032*6621Sbt150084 	uint32_t eicr;
3033*6621Sbt150084 
3034*6621Sbt150084 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3035*6621Sbt150084 
3036*6621Sbt150084 	/*
3037*6621Sbt150084 	 * For MSI interrupt, we have only one vector,
3038*6621Sbt150084 	 * so we have only one rx ring and one tx ring enabled.
3039*6621Sbt150084 	 */
3040*6621Sbt150084 	ASSERT(ixgbe->num_rx_rings == 1);
3041*6621Sbt150084 	ASSERT(ixgbe->num_tx_rings == 1);
3042*6621Sbt150084 
3043*6621Sbt150084 	/*
3044*6621Sbt150084 	 * For MSI interrupt, we can't differentiate
3045*6621Sbt150084 	 * between tx and rx, so always clean both.
3046*6621Sbt150084 	 */
3047*6621Sbt150084 	if (eicr & IXGBE_EICR_RTX_QUEUE) {
3048*6621Sbt150084 		ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
3049*6621Sbt150084 		ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
3050*6621Sbt150084 	}
3051*6621Sbt150084 
3052*6621Sbt150084 	if (eicr & IXGBE_EICR_LSC) {
3053*6621Sbt150084 		ixgbe_intr_other_work(ixgbe);
3054*6621Sbt150084 	}
3055*6621Sbt150084 
3056*6621Sbt150084 	return (DDI_INTR_CLAIMED);
3057*6621Sbt150084 }
3058*6621Sbt150084 
3059*6621Sbt150084 /*
3060*6621Sbt150084  * ixgbe_intr_rx - Interrupt handler for rx.
3061*6621Sbt150084  */
3062*6621Sbt150084 static uint_t
3063*6621Sbt150084 ixgbe_intr_rx(void *arg1, void *arg2)
3064*6621Sbt150084 {
3065*6621Sbt150084 	_NOTE(ARGUNUSED(arg2));
3066*6621Sbt150084 	ixgbe_ring_vector_t	*vect = (ixgbe_ring_vector_t *)arg1;
3067*6621Sbt150084 	ixgbe_t			*ixgbe = vect->ixgbe;
3068*6621Sbt150084 	int			r_idx;
3069*6621Sbt150084 
3070*6621Sbt150084 	/*
3071*6621Sbt150084 	 * clean each rx ring that has its bit set in the map
3072*6621Sbt150084 	 */
3073*6621Sbt150084 	r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
3074*6621Sbt150084 
3075*6621Sbt150084 	while (r_idx >= 0) {
3076*6621Sbt150084 		ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
3077*6621Sbt150084 		r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
3078*6621Sbt150084 		    (ixgbe->num_rx_rings - 1));
3079*6621Sbt150084 	}
3080*6621Sbt150084 
3081*6621Sbt150084 	return (DDI_INTR_CLAIMED);
3082*6621Sbt150084 }
3083*6621Sbt150084 
3084*6621Sbt150084 /*
3085*6621Sbt150084  * ixgbe_intr_tx_other - Interrupt handler for both tx and other.
3086*6621Sbt150084  *
3087*6621Sbt150084  * Always look for Tx cleanup work.  Only look for other work if the right
3088*6621Sbt150084  * bits are set in the Interrupt Cause Register.
3089*6621Sbt150084  */
3090*6621Sbt150084 static uint_t
3091*6621Sbt150084 ixgbe_intr_tx_other(void *arg1, void *arg2)
3092*6621Sbt150084 {
3093*6621Sbt150084 	_NOTE(ARGUNUSED(arg2));
3094*6621Sbt150084 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3095*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
3096*6621Sbt150084 	uint32_t eicr;
3097*6621Sbt150084 
3098*6621Sbt150084 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3099*6621Sbt150084 
3100*6621Sbt150084 	/*
3101*6621Sbt150084 	 * Always look for Tx cleanup work.  We don't have separate
3102*6621Sbt150084 	 * transmit vectors, so we have only one tx ring enabled.
3103*6621Sbt150084 	 */
3104*6621Sbt150084 	ASSERT(ixgbe->num_tx_rings == 1);
3105*6621Sbt150084 	ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
3106*6621Sbt150084 
3107*6621Sbt150084 	/*
3108*6621Sbt150084 	 * Check for "other" causes.
3109*6621Sbt150084 	 */
3110*6621Sbt150084 	if (eicr & IXGBE_EICR_LSC) {
3111*6621Sbt150084 		ixgbe_intr_other_work(ixgbe);
3112*6621Sbt150084 	}
3113*6621Sbt150084 
3114*6621Sbt150084 	return (DDI_INTR_CLAIMED);
3115*6621Sbt150084 }
3116*6621Sbt150084 
3117*6621Sbt150084 /*
3118*6621Sbt150084  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
3119*6621Sbt150084  *
3120*6621Sbt150084  * Normal sequence is to try MSI-X; if not sucessful, try MSI;
3121*6621Sbt150084  * if not successful, try Legacy.
3122*6621Sbt150084  * ixgbe->intr_force can be used to force sequence to start with
3123*6621Sbt150084  * any of the 3 types.
3124*6621Sbt150084  * If MSI-X is not used, number of tx/rx rings is forced to 1.
3125*6621Sbt150084  */
3126*6621Sbt150084 static int
3127*6621Sbt150084 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
3128*6621Sbt150084 {
3129*6621Sbt150084 	dev_info_t *devinfo;
3130*6621Sbt150084 	int intr_types;
3131*6621Sbt150084 	int rc;
3132*6621Sbt150084 
3133*6621Sbt150084 	devinfo = ixgbe->dip;
3134*6621Sbt150084 
3135*6621Sbt150084 	/*
3136*6621Sbt150084 	 * Get supported interrupt types
3137*6621Sbt150084 	 */
3138*6621Sbt150084 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
3139*6621Sbt150084 
3140*6621Sbt150084 	if (rc != DDI_SUCCESS) {
3141*6621Sbt150084 		ixgbe_log(ixgbe,
3142*6621Sbt150084 		    "Get supported interrupt types failed: %d", rc);
3143*6621Sbt150084 		return (IXGBE_FAILURE);
3144*6621Sbt150084 	}
3145*6621Sbt150084 	IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
3146*6621Sbt150084 
3147*6621Sbt150084 	ixgbe->intr_type = 0;
3148*6621Sbt150084 
3149*6621Sbt150084 	/*
3150*6621Sbt150084 	 * Install MSI-X interrupts
3151*6621Sbt150084 	 */
3152*6621Sbt150084 	if ((intr_types & DDI_INTR_TYPE_MSIX) &&
3153*6621Sbt150084 	    (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
3154*6621Sbt150084 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
3155*6621Sbt150084 		if (rc == IXGBE_SUCCESS)
3156*6621Sbt150084 			return (IXGBE_SUCCESS);
3157*6621Sbt150084 
3158*6621Sbt150084 		ixgbe_log(ixgbe,
3159*6621Sbt150084 		    "Allocate MSI-X failed, trying MSI interrupts...");
3160*6621Sbt150084 	}
3161*6621Sbt150084 
3162*6621Sbt150084 	/*
3163*6621Sbt150084 	 * MSI-X not used, force rings to 1
3164*6621Sbt150084 	 */
3165*6621Sbt150084 	ixgbe->num_rx_rings = 1;
3166*6621Sbt150084 	ixgbe->num_tx_rings = 1;
3167*6621Sbt150084 	ixgbe_log(ixgbe,
3168*6621Sbt150084 	    "MSI-X not used, force rx and tx queue number to 1");
3169*6621Sbt150084 
3170*6621Sbt150084 	/*
3171*6621Sbt150084 	 * Install MSI interrupts
3172*6621Sbt150084 	 */
3173*6621Sbt150084 	if ((intr_types & DDI_INTR_TYPE_MSI) &&
3174*6621Sbt150084 	    (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
3175*6621Sbt150084 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
3176*6621Sbt150084 		if (rc == IXGBE_SUCCESS)
3177*6621Sbt150084 			return (IXGBE_SUCCESS);
3178*6621Sbt150084 
3179*6621Sbt150084 		ixgbe_log(ixgbe,
3180*6621Sbt150084 		    "Allocate MSI failed, trying Legacy interrupts...");
3181*6621Sbt150084 	}
3182*6621Sbt150084 
3183*6621Sbt150084 	/*
3184*6621Sbt150084 	 * Install legacy interrupts
3185*6621Sbt150084 	 */
3186*6621Sbt150084 	if (intr_types & DDI_INTR_TYPE_FIXED) {
3187*6621Sbt150084 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
3188*6621Sbt150084 		if (rc == IXGBE_SUCCESS)
3189*6621Sbt150084 			return (IXGBE_SUCCESS);
3190*6621Sbt150084 
3191*6621Sbt150084 		ixgbe_log(ixgbe,
3192*6621Sbt150084 		    "Allocate Legacy interrupts failed");
3193*6621Sbt150084 	}
3194*6621Sbt150084 
3195*6621Sbt150084 	/*
3196*6621Sbt150084 	 * If none of the 3 types succeeded, return failure
3197*6621Sbt150084 	 */
3198*6621Sbt150084 	return (IXGBE_FAILURE);
3199*6621Sbt150084 }
3200*6621Sbt150084 
3201*6621Sbt150084 /*
3202*6621Sbt150084  * ixgbe_alloc_intr_handles - Allocate interrupt handles.
3203*6621Sbt150084  *
3204*6621Sbt150084  * For legacy and MSI, only 1 handle is needed.  For MSI-X,
3205*6621Sbt150084  * if fewer than 2 handles are available, return failure.
3206*6621Sbt150084  * Upon success, this sets the number of Rx rings to a number that
3207*6621Sbt150084  * matches the handles available for Rx interrupts.
3208*6621Sbt150084  */
3209*6621Sbt150084 static int
3210*6621Sbt150084 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
3211*6621Sbt150084 {
3212*6621Sbt150084 	dev_info_t *devinfo;
3213*6621Sbt150084 	int request, count, avail, actual;
3214*6621Sbt150084 	int rx_rings, minimum;
3215*6621Sbt150084 	int rc;
3216*6621Sbt150084 
3217*6621Sbt150084 	devinfo = ixgbe->dip;
3218*6621Sbt150084 
3219*6621Sbt150084 	/*
3220*6621Sbt150084 	 * Currently only 1 tx ring is supported. More tx rings
3221*6621Sbt150084 	 * will be supported with future enhancement.
3222*6621Sbt150084 	 */
3223*6621Sbt150084 	if (ixgbe->num_tx_rings > 1) {
3224*6621Sbt150084 		ixgbe->num_tx_rings = 1;
3225*6621Sbt150084 		ixgbe_log(ixgbe,
3226*6621Sbt150084 		    "Use only 1 MSI-X vector for tx, "
3227*6621Sbt150084 		    "force tx queue number to 1");
3228*6621Sbt150084 	}
3229*6621Sbt150084 
3230*6621Sbt150084 	switch (intr_type) {
3231*6621Sbt150084 	case DDI_INTR_TYPE_FIXED:
3232*6621Sbt150084 		request = 1;	/* Request 1 legacy interrupt handle */
3233*6621Sbt150084 		minimum = 1;
3234*6621Sbt150084 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
3235*6621Sbt150084 		break;
3236*6621Sbt150084 
3237*6621Sbt150084 	case DDI_INTR_TYPE_MSI:
3238*6621Sbt150084 		request = 1;	/* Request 1 MSI interrupt handle */
3239*6621Sbt150084 		minimum = 1;
3240*6621Sbt150084 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
3241*6621Sbt150084 		break;
3242*6621Sbt150084 
3243*6621Sbt150084 	case DDI_INTR_TYPE_MSIX:
3244*6621Sbt150084 		/*
3245*6621Sbt150084 		 * Best number of vectors for the adapter is
3246*6621Sbt150084 		 * # rx rings + # tx rings + 1 for other
3247*6621Sbt150084 		 * But currently we only support number of vectors of
3248*6621Sbt150084 		 * # rx rings + 1 for tx & other
3249*6621Sbt150084 		 */
3250*6621Sbt150084 		request = ixgbe->num_rx_rings + 1;
3251*6621Sbt150084 		minimum = 2;
3252*6621Sbt150084 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
3253*6621Sbt150084 		break;
3254*6621Sbt150084 
3255*6621Sbt150084 	default:
3256*6621Sbt150084 		ixgbe_log(ixgbe,
3257*6621Sbt150084 		    "invalid call to ixgbe_alloc_intr_handles(): %d\n",
3258*6621Sbt150084 		    intr_type);
3259*6621Sbt150084 		return (IXGBE_FAILURE);
3260*6621Sbt150084 	}
3261*6621Sbt150084 	IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d  minimum: %d",
3262*6621Sbt150084 	    request, minimum);
3263*6621Sbt150084 
3264*6621Sbt150084 	/*
3265*6621Sbt150084 	 * Get number of supported interrupts
3266*6621Sbt150084 	 */
3267*6621Sbt150084 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
3268*6621Sbt150084 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
3269*6621Sbt150084 		ixgbe_log(ixgbe,
3270*6621Sbt150084 		    "Get interrupt number failed. Return: %d, count: %d",
3271*6621Sbt150084 		    rc, count);
3272*6621Sbt150084 		return (IXGBE_FAILURE);
3273*6621Sbt150084 	}
3274*6621Sbt150084 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
3275*6621Sbt150084 
3276*6621Sbt150084 	/*
3277*6621Sbt150084 	 * Get number of available interrupts
3278*6621Sbt150084 	 */
3279*6621Sbt150084 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
3280*6621Sbt150084 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
3281*6621Sbt150084 		ixgbe_log(ixgbe,
3282*6621Sbt150084 		    "Get interrupt available number failed. "
3283*6621Sbt150084 		    "Return: %d, available: %d", rc, avail);
3284*6621Sbt150084 		return (IXGBE_FAILURE);
3285*6621Sbt150084 	}
3286*6621Sbt150084 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts available: %d", avail);
3287*6621Sbt150084 
3288*6621Sbt150084 	if (avail < request) {
3289*6621Sbt150084 		ixgbe_log(ixgbe, "Request %d handles, %d available",
3290*6621Sbt150084 		    request, avail);
3291*6621Sbt150084 		request = avail;
3292*6621Sbt150084 	}
3293*6621Sbt150084 
3294*6621Sbt150084 	actual = 0;
3295*6621Sbt150084 	ixgbe->intr_cnt = 0;
3296*6621Sbt150084 
3297*6621Sbt150084 	/*
3298*6621Sbt150084 	 * Allocate an array of interrupt handles
3299*6621Sbt150084 	 */
3300*6621Sbt150084 	ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
3301*6621Sbt150084 	ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
3302*6621Sbt150084 
3303*6621Sbt150084 	rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
3304*6621Sbt150084 	    request, &actual, DDI_INTR_ALLOC_NORMAL);
3305*6621Sbt150084 	if (rc != DDI_SUCCESS) {
3306*6621Sbt150084 		ixgbe_log(ixgbe, "Allocate interrupts failed. "
3307*6621Sbt150084 		    "return: %d, request: %d, actual: %d",
3308*6621Sbt150084 		    rc, request, actual);
3309*6621Sbt150084 		goto alloc_handle_fail;
3310*6621Sbt150084 	}
3311*6621Sbt150084 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
3312*6621Sbt150084 
3313*6621Sbt150084 	ixgbe->intr_cnt = actual;
3314*6621Sbt150084 
3315*6621Sbt150084 	/*
3316*6621Sbt150084 	 * Now we know the actual number of vectors.  Here we assume that
3317*6621Sbt150084 	 * tx and other will share 1 vector and all remaining (must be at
3318*6621Sbt150084 	 * least 1 remaining) will be used for rx.
3319*6621Sbt150084 	 */
3320*6621Sbt150084 	if (actual < minimum) {
3321*6621Sbt150084 		ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
3322*6621Sbt150084 		    actual);
3323*6621Sbt150084 		goto alloc_handle_fail;
3324*6621Sbt150084 	}
3325*6621Sbt150084 
3326*6621Sbt150084 	/*
3327*6621Sbt150084 	 * For MSI-X, actual might force us to reduce number of rx rings
3328*6621Sbt150084 	 */
3329*6621Sbt150084 	if (intr_type == DDI_INTR_TYPE_MSIX) {
3330*6621Sbt150084 		rx_rings = actual - 1;
3331*6621Sbt150084 		if (rx_rings < ixgbe->num_rx_rings) {
3332*6621Sbt150084 			ixgbe_log(ixgbe,
3333*6621Sbt150084 			    "MSI-X vectors force Rx queue number to %d",
3334*6621Sbt150084 			    rx_rings);
3335*6621Sbt150084 			ixgbe->num_rx_rings = rx_rings;
3336*6621Sbt150084 		}
3337*6621Sbt150084 	}
3338*6621Sbt150084 
3339*6621Sbt150084 	/*
3340*6621Sbt150084 	 * Get priority for first vector, assume remaining are all the same
3341*6621Sbt150084 	 */
3342*6621Sbt150084 	rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
3343*6621Sbt150084 	if (rc != DDI_SUCCESS) {
3344*6621Sbt150084 		ixgbe_log(ixgbe,
3345*6621Sbt150084 		    "Get interrupt priority failed: %d", rc);
3346*6621Sbt150084 		goto alloc_handle_fail;
3347*6621Sbt150084 	}
3348*6621Sbt150084 
3349*6621Sbt150084 	rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
3350*6621Sbt150084 	if (rc != DDI_SUCCESS) {
3351*6621Sbt150084 		ixgbe_log(ixgbe,
3352*6621Sbt150084 		    "Get interrupt cap failed: %d", rc);
3353*6621Sbt150084 		goto alloc_handle_fail;
3354*6621Sbt150084 	}
3355*6621Sbt150084 
3356*6621Sbt150084 	ixgbe->intr_type = intr_type;
3357*6621Sbt150084 
3358*6621Sbt150084 	return (IXGBE_SUCCESS);
3359*6621Sbt150084 
3360*6621Sbt150084 alloc_handle_fail:
3361*6621Sbt150084 	ixgbe_rem_intrs(ixgbe);
3362*6621Sbt150084 
3363*6621Sbt150084 	return (IXGBE_FAILURE);
3364*6621Sbt150084 }
3365*6621Sbt150084 
3366*6621Sbt150084 /*
3367*6621Sbt150084  * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
3368*6621Sbt150084  *
3369*6621Sbt150084  * Before adding the interrupt handlers, the interrupt vectors have
3370*6621Sbt150084  * been allocated, and the rx/tx rings have also been allocated.
3371*6621Sbt150084  */
3372*6621Sbt150084 static int
3373*6621Sbt150084 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
3374*6621Sbt150084 {
3375*6621Sbt150084 	ixgbe_rx_ring_t *rx_ring;
3376*6621Sbt150084 	int vector;
3377*6621Sbt150084 	int rc;
3378*6621Sbt150084 	int i;
3379*6621Sbt150084 
3380*6621Sbt150084 	vector = 0;
3381*6621Sbt150084 
3382*6621Sbt150084 	switch (ixgbe->intr_type) {
3383*6621Sbt150084 	case DDI_INTR_TYPE_MSIX:
3384*6621Sbt150084 		/*
3385*6621Sbt150084 		 * Add interrupt handler for tx + other
3386*6621Sbt150084 		 */
3387*6621Sbt150084 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3388*6621Sbt150084 		    (ddi_intr_handler_t *)ixgbe_intr_tx_other,
3389*6621Sbt150084 		    (void *)ixgbe, NULL);
3390*6621Sbt150084 		if (rc != DDI_SUCCESS) {
3391*6621Sbt150084 			ixgbe_log(ixgbe,
3392*6621Sbt150084 			    "Add tx/other interrupt handler failed: %d", rc);
3393*6621Sbt150084 			return (IXGBE_FAILURE);
3394*6621Sbt150084 		}
3395*6621Sbt150084 		vector++;
3396*6621Sbt150084 
3397*6621Sbt150084 		/*
3398*6621Sbt150084 		 * Add interrupt handler for each rx ring
3399*6621Sbt150084 		 */
3400*6621Sbt150084 		for (i = 0; i < ixgbe->num_rx_rings; i++) {
3401*6621Sbt150084 			rx_ring = &ixgbe->rx_rings[i];
3402*6621Sbt150084 
3403*6621Sbt150084 			/*
3404*6621Sbt150084 			 * install pointer to vect_map[vector]
3405*6621Sbt150084 			 */
3406*6621Sbt150084 			rc = ddi_intr_add_handler(ixgbe->htable[vector],
3407*6621Sbt150084 			    (ddi_intr_handler_t *)ixgbe_intr_rx,
3408*6621Sbt150084 			    (void *)&ixgbe->vect_map[vector], NULL);
3409*6621Sbt150084 
3410*6621Sbt150084 			if (rc != DDI_SUCCESS) {
3411*6621Sbt150084 				ixgbe_log(ixgbe,
3412*6621Sbt150084 				    "Add rx interrupt handler failed. "
3413*6621Sbt150084 				    "return: %d, rx ring: %d", rc, i);
3414*6621Sbt150084 				for (vector--; vector >= 0; vector--) {
3415*6621Sbt150084 					(void) ddi_intr_remove_handler(
3416*6621Sbt150084 					    ixgbe->htable[vector]);
3417*6621Sbt150084 				}
3418*6621Sbt150084 				return (IXGBE_FAILURE);
3419*6621Sbt150084 			}
3420*6621Sbt150084 
3421*6621Sbt150084 			rx_ring->intr_vector = vector;
3422*6621Sbt150084 
3423*6621Sbt150084 			vector++;
3424*6621Sbt150084 		}
3425*6621Sbt150084 		break;
3426*6621Sbt150084 
3427*6621Sbt150084 	case DDI_INTR_TYPE_MSI:
3428*6621Sbt150084 		/*
3429*6621Sbt150084 		 * Add interrupt handlers for the only vector
3430*6621Sbt150084 		 */
3431*6621Sbt150084 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3432*6621Sbt150084 		    (ddi_intr_handler_t *)ixgbe_intr_msi,
3433*6621Sbt150084 		    (void *)ixgbe, NULL);
3434*6621Sbt150084 
3435*6621Sbt150084 		if (rc != DDI_SUCCESS) {
3436*6621Sbt150084 			ixgbe_log(ixgbe,
3437*6621Sbt150084 			    "Add MSI interrupt handler failed: %d", rc);
3438*6621Sbt150084 			return (IXGBE_FAILURE);
3439*6621Sbt150084 		}
3440*6621Sbt150084 
3441*6621Sbt150084 		rx_ring = &ixgbe->rx_rings[0];
3442*6621Sbt150084 		rx_ring->intr_vector = vector;
3443*6621Sbt150084 
3444*6621Sbt150084 		vector++;
3445*6621Sbt150084 		break;
3446*6621Sbt150084 
3447*6621Sbt150084 	case DDI_INTR_TYPE_FIXED:
3448*6621Sbt150084 		/*
3449*6621Sbt150084 		 * Add interrupt handlers for the only vector
3450*6621Sbt150084 		 */
3451*6621Sbt150084 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3452*6621Sbt150084 		    (ddi_intr_handler_t *)ixgbe_intr_legacy,
3453*6621Sbt150084 		    (void *)ixgbe, NULL);
3454*6621Sbt150084 
3455*6621Sbt150084 		if (rc != DDI_SUCCESS) {
3456*6621Sbt150084 			ixgbe_log(ixgbe,
3457*6621Sbt150084 			    "Add legacy interrupt handler failed: %d", rc);
3458*6621Sbt150084 			return (IXGBE_FAILURE);
3459*6621Sbt150084 		}
3460*6621Sbt150084 
3461*6621Sbt150084 		rx_ring = &ixgbe->rx_rings[0];
3462*6621Sbt150084 		rx_ring->intr_vector = vector;
3463*6621Sbt150084 
3464*6621Sbt150084 		vector++;
3465*6621Sbt150084 		break;
3466*6621Sbt150084 
3467*6621Sbt150084 	default:
3468*6621Sbt150084 		return (IXGBE_FAILURE);
3469*6621Sbt150084 	}
3470*6621Sbt150084 
3471*6621Sbt150084 	ASSERT(vector == ixgbe->intr_cnt);
3472*6621Sbt150084 
3473*6621Sbt150084 	return (IXGBE_SUCCESS);
3474*6621Sbt150084 }
3475*6621Sbt150084 
3476*6621Sbt150084 #pragma inline(ixgbe_map_rxring_to_vector)
3477*6621Sbt150084 /*
3478*6621Sbt150084  * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
3479*6621Sbt150084  */
3480*6621Sbt150084 static void
3481*6621Sbt150084 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
3482*6621Sbt150084 {
3483*6621Sbt150084 	ixgbe->vect_map[v_idx].ixgbe = ixgbe;
3484*6621Sbt150084 
3485*6621Sbt150084 	/*
3486*6621Sbt150084 	 * Set bit in map
3487*6621Sbt150084 	 */
3488*6621Sbt150084 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
3489*6621Sbt150084 
3490*6621Sbt150084 	/*
3491*6621Sbt150084 	 * Count bits set
3492*6621Sbt150084 	 */
3493*6621Sbt150084 	ixgbe->vect_map[v_idx].rxr_cnt++;
3494*6621Sbt150084 
3495*6621Sbt150084 	/*
3496*6621Sbt150084 	 * Remember bit position
3497*6621Sbt150084 	 */
3498*6621Sbt150084 	ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
3499*6621Sbt150084 }
3500*6621Sbt150084 
3501*6621Sbt150084 #pragma inline(ixgbe_map_txring_to_vector)
3502*6621Sbt150084 /*
3503*6621Sbt150084  * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
3504*6621Sbt150084  */
3505*6621Sbt150084 static void
3506*6621Sbt150084 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
3507*6621Sbt150084 {
3508*6621Sbt150084 	ixgbe->vect_map[v_idx].ixgbe = ixgbe;
3509*6621Sbt150084 
3510*6621Sbt150084 	/*
3511*6621Sbt150084 	 * Set bit in map
3512*6621Sbt150084 	 */
3513*6621Sbt150084 	BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
3514*6621Sbt150084 
3515*6621Sbt150084 	/*
3516*6621Sbt150084 	 * Count bits set
3517*6621Sbt150084 	 */
3518*6621Sbt150084 	ixgbe->vect_map[v_idx].txr_cnt++;
3519*6621Sbt150084 
3520*6621Sbt150084 	/*
3521*6621Sbt150084 	 * Remember bit position
3522*6621Sbt150084 	 */
3523*6621Sbt150084 	ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
3524*6621Sbt150084 }
3525*6621Sbt150084 
3526*6621Sbt150084 /*
3527*6621Sbt150084  * ixgbe_set_ivar - Set the given entry in the given interrupt vector
3528*6621Sbt150084  * allocation register (IVAR).
3529*6621Sbt150084  */
3530*6621Sbt150084 static void
3531*6621Sbt150084 ixgbe_set_ivar(ixgbe_t *ixgbe, uint16_t int_alloc_entry, uint8_t msix_vector)
3532*6621Sbt150084 {
3533*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
3534*6621Sbt150084 	u32 ivar, index;
3535*6621Sbt150084 
3536*6621Sbt150084 	msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3537*6621Sbt150084 	index = (int_alloc_entry >> 2) & 0x1F;
3538*6621Sbt150084 	ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3539*6621Sbt150084 	ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
3540*6621Sbt150084 	ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
3541*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3542*6621Sbt150084 }
3543*6621Sbt150084 
3544*6621Sbt150084 /*
3545*6621Sbt150084  * ixgbe_map_rings_to_vectors - Map descriptor rings to interrupt vectors.
3546*6621Sbt150084  *
3547*6621Sbt150084  * For msi-x, this currently implements only the scheme which is
3548*6621Sbt150084  * 1 vector for tx + other, 1 vector for each rx ring.
3549*6621Sbt150084  */
3550*6621Sbt150084 static int
3551*6621Sbt150084 ixgbe_map_rings_to_vectors(ixgbe_t *ixgbe)
3552*6621Sbt150084 {
3553*6621Sbt150084 	int i, vector = 0;
3554*6621Sbt150084 	int vect_remain = ixgbe->intr_cnt;
3555*6621Sbt150084 
3556*6621Sbt150084 	/* initialize vector map */
3557*6621Sbt150084 	bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
3558*6621Sbt150084 
3559*6621Sbt150084 	/*
3560*6621Sbt150084 	 * non-MSI-X case is very simple: all interrupts on vector 0
3561*6621Sbt150084 	 */
3562*6621Sbt150084 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
3563*6621Sbt150084 		ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
3564*6621Sbt150084 		ixgbe_map_txring_to_vector(ixgbe, 0, 0);
3565*6621Sbt150084 		return (IXGBE_SUCCESS);
3566*6621Sbt150084 	}
3567*6621Sbt150084 
3568*6621Sbt150084 	/*
3569*6621Sbt150084 	 * Ring/vector mapping for MSI-X
3570*6621Sbt150084 	 */
3571*6621Sbt150084 
3572*6621Sbt150084 	/*
3573*6621Sbt150084 	 * Map vector 0 to tx
3574*6621Sbt150084 	 */
3575*6621Sbt150084 	ixgbe_map_txring_to_vector(ixgbe, 0, vector++);
3576*6621Sbt150084 	vect_remain--;
3577*6621Sbt150084 
3578*6621Sbt150084 	/*
3579*6621Sbt150084 	 * Map remaining vectors to rx rings
3580*6621Sbt150084 	 */
3581*6621Sbt150084 	for (i = 0; i < vect_remain; i++) {
3582*6621Sbt150084 		ixgbe_map_rxring_to_vector(ixgbe, i, vector++);
3583*6621Sbt150084 	}
3584*6621Sbt150084 
3585*6621Sbt150084 	return (IXGBE_SUCCESS);
3586*6621Sbt150084 }
3587*6621Sbt150084 
3588*6621Sbt150084 /*
3589*6621Sbt150084  * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
3590*6621Sbt150084  *
3591*6621Sbt150084  * This relies on queue/vector mapping already set up in the
3592*6621Sbt150084  * vect_map[] structures
3593*6621Sbt150084  */
3594*6621Sbt150084 static void
3595*6621Sbt150084 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
3596*6621Sbt150084 {
3597*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
3598*6621Sbt150084 	ixgbe_ring_vector_t	*vect;	/* vector bitmap */
3599*6621Sbt150084 	int			r_idx;	/* ring index */
3600*6621Sbt150084 	int			v_idx;	/* vector index */
3601*6621Sbt150084 
3602*6621Sbt150084 	/*
3603*6621Sbt150084 	 * Clear any previous entries
3604*6621Sbt150084 	 */
3605*6621Sbt150084 	for (v_idx = 0; v_idx < IXGBE_IVAR_REG_NUM; v_idx++)
3606*6621Sbt150084 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
3607*6621Sbt150084 
3608*6621Sbt150084 	/*
3609*6621Sbt150084 	 * "Other" is always on vector 0
3610*6621Sbt150084 	 */
3611*6621Sbt150084 	ixgbe_set_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0);
3612*6621Sbt150084 
3613*6621Sbt150084 	/*
3614*6621Sbt150084 	 * For each interrupt vector, populate the IVAR table
3615*6621Sbt150084 	 */
3616*6621Sbt150084 	for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
3617*6621Sbt150084 		vect = &ixgbe->vect_map[v_idx];
3618*6621Sbt150084 
3619*6621Sbt150084 		/*
3620*6621Sbt150084 		 * For each rx ring bit set
3621*6621Sbt150084 		 */
3622*6621Sbt150084 		r_idx = bt_getlowbit(vect->rx_map, 0,
3623*6621Sbt150084 		    (ixgbe->num_rx_rings - 1));
3624*6621Sbt150084 
3625*6621Sbt150084 		while (r_idx >= 0) {
3626*6621Sbt150084 			ixgbe_set_ivar(ixgbe, IXGBE_IVAR_RX_QUEUE(r_idx),
3627*6621Sbt150084 			    v_idx);
3628*6621Sbt150084 			r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
3629*6621Sbt150084 			    (ixgbe->num_rx_rings - 1));
3630*6621Sbt150084 		}
3631*6621Sbt150084 
3632*6621Sbt150084 		/*
3633*6621Sbt150084 		 * For each tx ring bit set
3634*6621Sbt150084 		 */
3635*6621Sbt150084 		r_idx = bt_getlowbit(vect->tx_map, 0,
3636*6621Sbt150084 		    (ixgbe->num_tx_rings - 1));
3637*6621Sbt150084 
3638*6621Sbt150084 		while (r_idx >= 0) {
3639*6621Sbt150084 			ixgbe_set_ivar(ixgbe, IXGBE_IVAR_TX_QUEUE(r_idx),
3640*6621Sbt150084 			    v_idx);
3641*6621Sbt150084 			r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
3642*6621Sbt150084 			    (ixgbe->num_tx_rings - 1));
3643*6621Sbt150084 		}
3644*6621Sbt150084 	}
3645*6621Sbt150084 }
3646*6621Sbt150084 
3647*6621Sbt150084 /*
3648*6621Sbt150084  * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
3649*6621Sbt150084  */
3650*6621Sbt150084 static void
3651*6621Sbt150084 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
3652*6621Sbt150084 {
3653*6621Sbt150084 	int i;
3654*6621Sbt150084 	int rc;
3655*6621Sbt150084 
3656*6621Sbt150084 	for (i = 0; i < ixgbe->intr_cnt; i++) {
3657*6621Sbt150084 		rc = ddi_intr_remove_handler(ixgbe->htable[i]);
3658*6621Sbt150084 		if (rc != DDI_SUCCESS) {
3659*6621Sbt150084 			IXGBE_DEBUGLOG_1(ixgbe,
3660*6621Sbt150084 			    "Remove intr handler failed: %d", rc);
3661*6621Sbt150084 		}
3662*6621Sbt150084 	}
3663*6621Sbt150084 }
3664*6621Sbt150084 
3665*6621Sbt150084 /*
3666*6621Sbt150084  * ixgbe_rem_intrs - Remove the allocated interrupts.
3667*6621Sbt150084  */
3668*6621Sbt150084 static void
3669*6621Sbt150084 ixgbe_rem_intrs(ixgbe_t *ixgbe)
3670*6621Sbt150084 {
3671*6621Sbt150084 	int i;
3672*6621Sbt150084 	int rc;
3673*6621Sbt150084 
3674*6621Sbt150084 	for (i = 0; i < ixgbe->intr_cnt; i++) {
3675*6621Sbt150084 		rc = ddi_intr_free(ixgbe->htable[i]);
3676*6621Sbt150084 		if (rc != DDI_SUCCESS) {
3677*6621Sbt150084 			IXGBE_DEBUGLOG_1(ixgbe,
3678*6621Sbt150084 			    "Free intr failed: %d", rc);
3679*6621Sbt150084 		}
3680*6621Sbt150084 	}
3681*6621Sbt150084 
3682*6621Sbt150084 	kmem_free(ixgbe->htable, ixgbe->intr_size);
3683*6621Sbt150084 	ixgbe->htable = NULL;
3684*6621Sbt150084 }
3685*6621Sbt150084 
3686*6621Sbt150084 /*
3687*6621Sbt150084  * ixgbe_enable_intrs - Enable all the ddi interrupts.
3688*6621Sbt150084  */
3689*6621Sbt150084 static int
3690*6621Sbt150084 ixgbe_enable_intrs(ixgbe_t *ixgbe)
3691*6621Sbt150084 {
3692*6621Sbt150084 	int i;
3693*6621Sbt150084 	int rc;
3694*6621Sbt150084 
3695*6621Sbt150084 	/*
3696*6621Sbt150084 	 * Enable interrupts
3697*6621Sbt150084 	 */
3698*6621Sbt150084 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
3699*6621Sbt150084 		/*
3700*6621Sbt150084 		 * Call ddi_intr_block_enable() for MSI
3701*6621Sbt150084 		 */
3702*6621Sbt150084 		rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
3703*6621Sbt150084 		if (rc != DDI_SUCCESS) {
3704*6621Sbt150084 			ixgbe_log(ixgbe,
3705*6621Sbt150084 			    "Enable block intr failed: %d", rc);
3706*6621Sbt150084 			return (IXGBE_FAILURE);
3707*6621Sbt150084 		}
3708*6621Sbt150084 	} else {
3709*6621Sbt150084 		/*
3710*6621Sbt150084 		 * Call ddi_intr_enable() for Legacy/MSI non block enable
3711*6621Sbt150084 		 */
3712*6621Sbt150084 		for (i = 0; i < ixgbe->intr_cnt; i++) {
3713*6621Sbt150084 			rc = ddi_intr_enable(ixgbe->htable[i]);
3714*6621Sbt150084 			if (rc != DDI_SUCCESS) {
3715*6621Sbt150084 				ixgbe_log(ixgbe,
3716*6621Sbt150084 				    "Enable intr failed: %d", rc);
3717*6621Sbt150084 				return (IXGBE_FAILURE);
3718*6621Sbt150084 			}
3719*6621Sbt150084 		}
3720*6621Sbt150084 	}
3721*6621Sbt150084 
3722*6621Sbt150084 	return (IXGBE_SUCCESS);
3723*6621Sbt150084 }
3724*6621Sbt150084 
3725*6621Sbt150084 /*
3726*6621Sbt150084  * ixgbe_disable_intrs - Disable all the interrupts.
3727*6621Sbt150084  */
3728*6621Sbt150084 static int
3729*6621Sbt150084 ixgbe_disable_intrs(ixgbe_t *ixgbe)
3730*6621Sbt150084 {
3731*6621Sbt150084 	int i;
3732*6621Sbt150084 	int rc;
3733*6621Sbt150084 
3734*6621Sbt150084 	/*
3735*6621Sbt150084 	 * Disable all interrupts
3736*6621Sbt150084 	 */
3737*6621Sbt150084 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
3738*6621Sbt150084 		rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
3739*6621Sbt150084 		if (rc != DDI_SUCCESS) {
3740*6621Sbt150084 			ixgbe_log(ixgbe,
3741*6621Sbt150084 			    "Disable block intr failed: %d", rc);
3742*6621Sbt150084 			return (IXGBE_FAILURE);
3743*6621Sbt150084 		}
3744*6621Sbt150084 	} else {
3745*6621Sbt150084 		for (i = 0; i < ixgbe->intr_cnt; i++) {
3746*6621Sbt150084 			rc = ddi_intr_disable(ixgbe->htable[i]);
3747*6621Sbt150084 			if (rc != DDI_SUCCESS) {
3748*6621Sbt150084 				ixgbe_log(ixgbe,
3749*6621Sbt150084 				    "Disable intr failed: %d", rc);
3750*6621Sbt150084 				return (IXGBE_FAILURE);
3751*6621Sbt150084 			}
3752*6621Sbt150084 		}
3753*6621Sbt150084 	}
3754*6621Sbt150084 
3755*6621Sbt150084 	return (IXGBE_SUCCESS);
3756*6621Sbt150084 }
3757*6621Sbt150084 
3758*6621Sbt150084 /*
3759*6621Sbt150084  * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
3760*6621Sbt150084  */
3761*6621Sbt150084 static void
3762*6621Sbt150084 ixgbe_get_hw_state(ixgbe_t *ixgbe)
3763*6621Sbt150084 {
3764*6621Sbt150084 	struct ixgbe_hw *hw = &ixgbe->hw;
3765*6621Sbt150084 	uint32_t links;
3766*6621Sbt150084 	uint32_t pcs1g_anlp = 0;
3767*6621Sbt150084 	uint32_t pcs1g_ana = 0;
3768*6621Sbt150084 
3769*6621Sbt150084 	ASSERT(mutex_owned(&ixgbe->gen_lock));
3770*6621Sbt150084 	ixgbe->param_lp_1000fdx_cap = 0;
3771*6621Sbt150084 	ixgbe->param_lp_100fdx_cap  = 0;
3772*6621Sbt150084 
3773*6621Sbt150084 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3774*6621Sbt150084 	if (links & IXGBE_LINKS_PCS_1G_EN) {
3775*6621Sbt150084 		pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
3776*6621Sbt150084 		pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
3777*6621Sbt150084 
3778*6621Sbt150084 		ixgbe->param_lp_1000fdx_cap =
3779*6621Sbt150084 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
3780*6621Sbt150084 		ixgbe->param_lp_100fdx_cap =
3781*6621Sbt150084 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
3782*6621Sbt150084 	}
3783*6621Sbt150084 
3784*6621Sbt150084 	ixgbe->param_1000fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
3785*6621Sbt150084 	ixgbe->param_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
3786*6621Sbt150084 }
3787*6621Sbt150084 
3788*6621Sbt150084 /*
3789*6621Sbt150084  * ixgbe_get_driver_control - Notify that driver is in control of device.
3790*6621Sbt150084  */
3791*6621Sbt150084 static void
3792*6621Sbt150084 ixgbe_get_driver_control(struct ixgbe_hw *hw)
3793*6621Sbt150084 {
3794*6621Sbt150084 	uint32_t ctrl_ext;
3795*6621Sbt150084 
3796*6621Sbt150084 	/*
3797*6621Sbt150084 	 * Notify firmware that driver is in control of device
3798*6621Sbt150084 	 */
3799*6621Sbt150084 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3800*6621Sbt150084 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
3801*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3802*6621Sbt150084 }
3803*6621Sbt150084 
3804*6621Sbt150084 /*
3805*6621Sbt150084  * ixgbe_release_driver_control - Notify that driver is no longer in control
3806*6621Sbt150084  * of device.
3807*6621Sbt150084  */
3808*6621Sbt150084 static void
3809*6621Sbt150084 ixgbe_release_driver_control(struct ixgbe_hw *hw)
3810*6621Sbt150084 {
3811*6621Sbt150084 	uint32_t ctrl_ext;
3812*6621Sbt150084 
3813*6621Sbt150084 	/*
3814*6621Sbt150084 	 * Notify firmware that driver is no longer in control of device
3815*6621Sbt150084 	 */
3816*6621Sbt150084 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3817*6621Sbt150084 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3818*6621Sbt150084 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3819*6621Sbt150084 }
3820*6621Sbt150084 
3821*6621Sbt150084 /*
3822*6621Sbt150084  * ixgbe_atomic_reserve - Atomic decrease operation.
3823*6621Sbt150084  */
3824*6621Sbt150084 int
3825*6621Sbt150084 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
3826*6621Sbt150084 {
3827*6621Sbt150084 	uint32_t oldval;
3828*6621Sbt150084 	uint32_t newval;
3829*6621Sbt150084 
3830*6621Sbt150084 	/*
3831*6621Sbt150084 	 * ATOMICALLY
3832*6621Sbt150084 	 */
3833*6621Sbt150084 	do {
3834*6621Sbt150084 		oldval = *count_p;
3835*6621Sbt150084 		if (oldval < n)
3836*6621Sbt150084 			return (-1);
3837*6621Sbt150084 		newval = oldval - n;
3838*6621Sbt150084 	} while (atomic_cas_32(count_p, oldval, newval) != oldval);
3839*6621Sbt150084 
3840*6621Sbt150084 	return (newval);
3841*6621Sbt150084 }
3842*6621Sbt150084 
3843*6621Sbt150084 /*
3844*6621Sbt150084  * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
3845*6621Sbt150084  */
3846*6621Sbt150084 static uint8_t *
3847*6621Sbt150084 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
3848*6621Sbt150084 {
3849*6621Sbt150084 	_NOTE(ARGUNUSED(hw));
3850*6621Sbt150084 	_NOTE(ARGUNUSED(vmdq));
3851*6621Sbt150084 	uint8_t *addr = *upd_ptr;
3852*6621Sbt150084 	uint8_t *new_ptr;
3853*6621Sbt150084 
3854*6621Sbt150084 	new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
3855*6621Sbt150084 	*upd_ptr = new_ptr;
3856*6621Sbt150084 	return (addr);
3857*6621Sbt150084 }
3858*6621Sbt150084 
3859*6621Sbt150084 /*
3860*6621Sbt150084  * FMA support
3861*6621Sbt150084  */
3862*6621Sbt150084 int
3863*6621Sbt150084 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
3864*6621Sbt150084 {
3865*6621Sbt150084 	ddi_fm_error_t de;
3866*6621Sbt150084 
3867*6621Sbt150084 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
3868*6621Sbt150084 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
3869*6621Sbt150084 	return (de.fme_status);
3870*6621Sbt150084 }
3871*6621Sbt150084 
3872*6621Sbt150084 int
3873*6621Sbt150084 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
3874*6621Sbt150084 {
3875*6621Sbt150084 	ddi_fm_error_t de;
3876*6621Sbt150084 
3877*6621Sbt150084 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
3878*6621Sbt150084 	return (de.fme_status);
3879*6621Sbt150084 }
3880*6621Sbt150084 
3881*6621Sbt150084 /*
3882*6621Sbt150084  * ixgbe_fm_error_cb - The IO fault service error handling callback function.
3883*6621Sbt150084  */
3884*6621Sbt150084 static int
3885*6621Sbt150084 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
3886*6621Sbt150084 {
3887*6621Sbt150084 	_NOTE(ARGUNUSED(impl_data));
3888*6621Sbt150084 	/*
3889*6621Sbt150084 	 * as the driver can always deal with an error in any dma or
3890*6621Sbt150084 	 * access handle, we can just return the fme_status value.
3891*6621Sbt150084 	 */
3892*6621Sbt150084 	pci_ereport_post(dip, err, NULL);
3893*6621Sbt150084 	return (err->fme_status);
3894*6621Sbt150084 }
3895*6621Sbt150084 
3896*6621Sbt150084 static void
3897*6621Sbt150084 ixgbe_fm_init(ixgbe_t *ixgbe)
3898*6621Sbt150084 {
3899*6621Sbt150084 	ddi_iblock_cookie_t iblk;
3900*6621Sbt150084 	int fma_acc_flag, fma_dma_flag;
3901*6621Sbt150084 
3902*6621Sbt150084 	/*
3903*6621Sbt150084 	 * Only register with IO Fault Services if we have some capability
3904*6621Sbt150084 	 */
3905*6621Sbt150084 	if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
3906*6621Sbt150084 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
3907*6621Sbt150084 		fma_acc_flag = 1;
3908*6621Sbt150084 	} else {
3909*6621Sbt150084 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
3910*6621Sbt150084 		fma_acc_flag = 0;
3911*6621Sbt150084 	}
3912*6621Sbt150084 
3913*6621Sbt150084 	if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
3914*6621Sbt150084 		fma_dma_flag = 1;
3915*6621Sbt150084 	} else {
3916*6621Sbt150084 		fma_dma_flag = 0;
3917*6621Sbt150084 	}
3918*6621Sbt150084 
3919*6621Sbt150084 	ixgbe_set_fma_flags(fma_acc_flag, fma_dma_flag);
3920*6621Sbt150084 
3921*6621Sbt150084 	if (ixgbe->fm_capabilities) {
3922*6621Sbt150084 
3923*6621Sbt150084 		/*
3924*6621Sbt150084 		 * Register capabilities with IO Fault Services
3925*6621Sbt150084 		 */
3926*6621Sbt150084 		ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
3927*6621Sbt150084 
3928*6621Sbt150084 		/*
3929*6621Sbt150084 		 * Initialize pci ereport capabilities if ereport capable
3930*6621Sbt150084 		 */
3931*6621Sbt150084 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
3932*6621Sbt150084 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
3933*6621Sbt150084 			pci_ereport_setup(ixgbe->dip);
3934*6621Sbt150084 
3935*6621Sbt150084 		/*
3936*6621Sbt150084 		 * Register error callback if error callback capable
3937*6621Sbt150084 		 */
3938*6621Sbt150084 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
3939*6621Sbt150084 			ddi_fm_handler_register(ixgbe->dip,
3940*6621Sbt150084 			    ixgbe_fm_error_cb, (void*) ixgbe);
3941*6621Sbt150084 	}
3942*6621Sbt150084 }
3943*6621Sbt150084 
3944*6621Sbt150084 static void
3945*6621Sbt150084 ixgbe_fm_fini(ixgbe_t *ixgbe)
3946*6621Sbt150084 {
3947*6621Sbt150084 	/*
3948*6621Sbt150084 	 * Only unregister FMA capabilities if they are registered
3949*6621Sbt150084 	 */
3950*6621Sbt150084 	if (ixgbe->fm_capabilities) {
3951*6621Sbt150084 
3952*6621Sbt150084 		/*
3953*6621Sbt150084 		 * Release any resources allocated by pci_ereport_setup()
3954*6621Sbt150084 		 */
3955*6621Sbt150084 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
3956*6621Sbt150084 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
3957*6621Sbt150084 			pci_ereport_teardown(ixgbe->dip);
3958*6621Sbt150084 
3959*6621Sbt150084 		/*
3960*6621Sbt150084 		 * Un-register error callback if error callback capable
3961*6621Sbt150084 		 */
3962*6621Sbt150084 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
3963*6621Sbt150084 			ddi_fm_handler_unregister(ixgbe->dip);
3964*6621Sbt150084 
3965*6621Sbt150084 		/*
3966*6621Sbt150084 		 * Unregister from IO Fault Service
3967*6621Sbt150084 		 */
3968*6621Sbt150084 		ddi_fm_fini(ixgbe->dip);
3969*6621Sbt150084 	}
3970*6621Sbt150084 }
3971*6621Sbt150084 
3972*6621Sbt150084 void
3973*6621Sbt150084 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
3974*6621Sbt150084 {
3975*6621Sbt150084 	uint64_t ena;
3976*6621Sbt150084 	char buf[FM_MAX_CLASS];
3977*6621Sbt150084 
3978*6621Sbt150084 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
3979*6621Sbt150084 	ena = fm_ena_generate(0, FM_ENA_FMT1);
3980*6621Sbt150084 	if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
3981*6621Sbt150084 		ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
3982*6621Sbt150084 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
3983*6621Sbt150084 	}
3984*6621Sbt150084 }
3985