xref: /onnv-gate/usr/src/uts/sun4u/ngdr/io/dr_cpu.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate /*
30*0Sstevel@tonic-gate  * CPU support routines for DR
31*0Sstevel@tonic-gate  */
32*0Sstevel@tonic-gate 
33*0Sstevel@tonic-gate #include <sys/note.h>
34*0Sstevel@tonic-gate #include <sys/debug.h>
35*0Sstevel@tonic-gate #include <sys/types.h>
36*0Sstevel@tonic-gate #include <sys/errno.h>
37*0Sstevel@tonic-gate #include <sys/cred.h>
38*0Sstevel@tonic-gate #include <sys/dditypes.h>
39*0Sstevel@tonic-gate #include <sys/devops.h>
40*0Sstevel@tonic-gate #include <sys/modctl.h>
41*0Sstevel@tonic-gate #include <sys/poll.h>
42*0Sstevel@tonic-gate #include <sys/conf.h>
43*0Sstevel@tonic-gate #include <sys/ddi.h>
44*0Sstevel@tonic-gate #include <sys/sunddi.h>
45*0Sstevel@tonic-gate #include <sys/sunndi.h>
46*0Sstevel@tonic-gate #include <sys/ndi_impldefs.h>
47*0Sstevel@tonic-gate #include <sys/stat.h>
48*0Sstevel@tonic-gate #include <sys/kmem.h>
49*0Sstevel@tonic-gate #include <sys/processor.h>
50*0Sstevel@tonic-gate #include <sys/cpuvar.h>
51*0Sstevel@tonic-gate #include <sys/mem_config.h>
52*0Sstevel@tonic-gate #include <sys/promif.h>
53*0Sstevel@tonic-gate #include <sys/x_call.h>
54*0Sstevel@tonic-gate #include <sys/cpu_sgnblk_defs.h>
55*0Sstevel@tonic-gate #include <sys/membar.h>
56*0Sstevel@tonic-gate #include <sys/stack.h>
57*0Sstevel@tonic-gate #include <sys/sysmacros.h>
58*0Sstevel@tonic-gate #include <sys/machsystm.h>
59*0Sstevel@tonic-gate #include <sys/spitregs.h>
60*0Sstevel@tonic-gate 
61*0Sstevel@tonic-gate #include <sys/archsystm.h>
62*0Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
63*0Sstevel@tonic-gate #include <sys/pte.h>
64*0Sstevel@tonic-gate #include <sys/mmu.h>
65*0Sstevel@tonic-gate #include <sys/x_call.h>
66*0Sstevel@tonic-gate #include <sys/cpu_module.h>
67*0Sstevel@tonic-gate #include <sys/cheetahregs.h>
68*0Sstevel@tonic-gate 
69*0Sstevel@tonic-gate #include <sys/autoconf.h>
70*0Sstevel@tonic-gate #include <sys/cmn_err.h>
71*0Sstevel@tonic-gate 
72*0Sstevel@tonic-gate #include <sys/dr.h>
73*0Sstevel@tonic-gate #include <sys/dr_util.h>
74*0Sstevel@tonic-gate 
75*0Sstevel@tonic-gate #ifdef _STARFIRE
76*0Sstevel@tonic-gate #include <sys/starfire.h>
77*0Sstevel@tonic-gate extern struct cpu	*SIGBCPU;
78*0Sstevel@tonic-gate #else
79*0Sstevel@tonic-gate /* for the DR*INTERNAL_ERROR macros.  see sys/dr.h. */
80*0Sstevel@tonic-gate static char *dr_ie_fmt = "dr_cpu.c %d";
81*0Sstevel@tonic-gate #endif /* _STARFIRE */
82*0Sstevel@tonic-gate 
83*0Sstevel@tonic-gate int
84*0Sstevel@tonic-gate dr_cpu_unit_is_sane(dr_board_t *bp, dr_cpu_unit_t *cp)
85*0Sstevel@tonic-gate {
86*0Sstevel@tonic-gate #ifdef DEBUG
87*0Sstevel@tonic-gate 	processorid_t	cpuid;
88*0Sstevel@tonic-gate 
89*0Sstevel@tonic-gate 	/*
90*0Sstevel@tonic-gate 	 * cpuid and unit number should never be different
91*0Sstevel@tonic-gate 	 * than they were at discovery/connect time
92*0Sstevel@tonic-gate 	 */
93*0Sstevel@tonic-gate 	ASSERT(drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid) == 0);
94*0Sstevel@tonic-gate 
95*0Sstevel@tonic-gate 	ASSERT(cp->sbc_cm.sbdev_bp == bp);
96*0Sstevel@tonic-gate 	ASSERT(cp->sbc_cm.sbdev_type == SBD_COMP_CPU);
97*0Sstevel@tonic-gate 	ASSERT(cp->sbc_cpu_id == cpuid);
98*0Sstevel@tonic-gate #else
99*0Sstevel@tonic-gate 	_NOTE(ARGUNUSED(bp))
100*0Sstevel@tonic-gate 	_NOTE(ARGUNUSED(cp))
101*0Sstevel@tonic-gate #endif
102*0Sstevel@tonic-gate 
103*0Sstevel@tonic-gate 	return (1);
104*0Sstevel@tonic-gate }
105*0Sstevel@tonic-gate 
106*0Sstevel@tonic-gate static int
107*0Sstevel@tonic-gate dr_errno2ecode(int error)
108*0Sstevel@tonic-gate {
109*0Sstevel@tonic-gate 	int	rv;
110*0Sstevel@tonic-gate 
111*0Sstevel@tonic-gate 	switch (error) {
112*0Sstevel@tonic-gate 	case EBUSY:
113*0Sstevel@tonic-gate 		rv = ESBD_BUSY;
114*0Sstevel@tonic-gate 		break;
115*0Sstevel@tonic-gate 	case EINVAL:
116*0Sstevel@tonic-gate 		rv = ESBD_INVAL;
117*0Sstevel@tonic-gate 		break;
118*0Sstevel@tonic-gate 	case EALREADY:
119*0Sstevel@tonic-gate 		rv = ESBD_ALREADY;
120*0Sstevel@tonic-gate 		break;
121*0Sstevel@tonic-gate 	case ENODEV:
122*0Sstevel@tonic-gate 		rv = ESBD_NODEV;
123*0Sstevel@tonic-gate 		break;
124*0Sstevel@tonic-gate 	case ENOMEM:
125*0Sstevel@tonic-gate 		rv = ESBD_NOMEM;
126*0Sstevel@tonic-gate 		break;
127*0Sstevel@tonic-gate 	default:
128*0Sstevel@tonic-gate 		rv = ESBD_INVAL;
129*0Sstevel@tonic-gate 	}
130*0Sstevel@tonic-gate 
131*0Sstevel@tonic-gate 	return (rv);
132*0Sstevel@tonic-gate }
133*0Sstevel@tonic-gate 
134*0Sstevel@tonic-gate static void
135*0Sstevel@tonic-gate dr_cpu_set_prop(dr_cpu_unit_t *cp)
136*0Sstevel@tonic-gate {
137*0Sstevel@tonic-gate 	sbd_error_t	*err;
138*0Sstevel@tonic-gate 	dev_info_t	*dip;
139*0Sstevel@tonic-gate 	uint64_t	clock_freq;
140*0Sstevel@tonic-gate 	int		ecache_size = 0;
141*0Sstevel@tonic-gate 	char		*cache_str = NULL;
142*0Sstevel@tonic-gate 
143*0Sstevel@tonic-gate 	err = drmach_get_dip(cp->sbc_cm.sbdev_id, &dip);
144*0Sstevel@tonic-gate 	if (err) {
145*0Sstevel@tonic-gate 		DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
146*0Sstevel@tonic-gate 		return;
147*0Sstevel@tonic-gate 	}
148*0Sstevel@tonic-gate 
149*0Sstevel@tonic-gate 	if (dip == NULL) {
150*0Sstevel@tonic-gate #ifndef _STARFIRE
151*0Sstevel@tonic-gate 		/*
152*0Sstevel@tonic-gate 		 * Do not report an error on Starfire since
153*0Sstevel@tonic-gate 		 * the dip will not be created until after
154*0Sstevel@tonic-gate 		 * the CPU has been configured.
155*0Sstevel@tonic-gate 		 */
156*0Sstevel@tonic-gate 		DR_DEV_INTERNAL_ERROR(&cp->sbc_cm);
157*0Sstevel@tonic-gate #endif /* !_STARFIRE */
158*0Sstevel@tonic-gate 		return;
159*0Sstevel@tonic-gate 	}
160*0Sstevel@tonic-gate 
161*0Sstevel@tonic-gate 	/* read in the CPU speed */
162*0Sstevel@tonic-gate 	clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
163*0Sstevel@tonic-gate 	    DDI_PROP_DONTPASS, "clock-frequency", 0);
164*0Sstevel@tonic-gate 
165*0Sstevel@tonic-gate 	ASSERT(clock_freq != 0);
166*0Sstevel@tonic-gate 
167*0Sstevel@tonic-gate 	/*
168*0Sstevel@tonic-gate 	 * The ecache property string is not the same
169*0Sstevel@tonic-gate 	 * for all CPU implementations.
170*0Sstevel@tonic-gate 	 */
171*0Sstevel@tonic-gate 	switch (cp->sbc_cpu_impl) {
172*0Sstevel@tonic-gate 	case BLACKBIRD_IMPL:
173*0Sstevel@tonic-gate 	case CHEETAH_IMPL:
174*0Sstevel@tonic-gate 	case CHEETAH_PLUS_IMPL:
175*0Sstevel@tonic-gate 		cache_str = "ecache-size";
176*0Sstevel@tonic-gate 		break;
177*0Sstevel@tonic-gate 	case JAGUAR_IMPL:
178*0Sstevel@tonic-gate 		cache_str = "l2-cache-size";
179*0Sstevel@tonic-gate 		break;
180*0Sstevel@tonic-gate 	case PANTHER_IMPL:
181*0Sstevel@tonic-gate 		cache_str = "l3-cache-size";
182*0Sstevel@tonic-gate 		break;
183*0Sstevel@tonic-gate 	default:
184*0Sstevel@tonic-gate 		cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
185*0Sstevel@tonic-gate 		    cp->sbc_cpu_impl);
186*0Sstevel@tonic-gate 		ASSERT(0);
187*0Sstevel@tonic-gate 		break;
188*0Sstevel@tonic-gate 	}
189*0Sstevel@tonic-gate 
190*0Sstevel@tonic-gate 	if (cache_str != NULL) {
191*0Sstevel@tonic-gate 		/* read in the ecache size */
192*0Sstevel@tonic-gate 		ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
193*0Sstevel@tonic-gate 		    DDI_PROP_DONTPASS, cache_str, 0);
194*0Sstevel@tonic-gate 	}
195*0Sstevel@tonic-gate 
196*0Sstevel@tonic-gate 	ASSERT(ecache_size != 0);
197*0Sstevel@tonic-gate 
198*0Sstevel@tonic-gate 	/* convert to the proper units */
199*0Sstevel@tonic-gate 	cp->sbc_speed = (clock_freq + 500000) / 1000000;
200*0Sstevel@tonic-gate 	cp->sbc_ecache = ecache_size / (1024 * 1024);
201*0Sstevel@tonic-gate }
202*0Sstevel@tonic-gate 
203*0Sstevel@tonic-gate void
204*0Sstevel@tonic-gate dr_init_cpu_unit(dr_cpu_unit_t *cp)
205*0Sstevel@tonic-gate {
206*0Sstevel@tonic-gate 	sbd_error_t	*err;
207*0Sstevel@tonic-gate 	dr_state_t	new_state;
208*0Sstevel@tonic-gate 	int		cpuid;
209*0Sstevel@tonic-gate 	int		impl;
210*0Sstevel@tonic-gate 
211*0Sstevel@tonic-gate 	if (DR_DEV_IS_ATTACHED(&cp->sbc_cm)) {
212*0Sstevel@tonic-gate 		new_state = DR_STATE_CONFIGURED;
213*0Sstevel@tonic-gate 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
214*0Sstevel@tonic-gate 	} else if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
215*0Sstevel@tonic-gate 		new_state = DR_STATE_CONNECTED;
216*0Sstevel@tonic-gate 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
217*0Sstevel@tonic-gate 	} else {
218*0Sstevel@tonic-gate 		new_state = DR_STATE_EMPTY;
219*0Sstevel@tonic-gate 		cp->sbc_cm.sbdev_cond = SBD_COND_UNKNOWN;
220*0Sstevel@tonic-gate 	}
221*0Sstevel@tonic-gate 
222*0Sstevel@tonic-gate 	if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
223*0Sstevel@tonic-gate 		err = drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid);
224*0Sstevel@tonic-gate 		if (err) {
225*0Sstevel@tonic-gate 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
226*0Sstevel@tonic-gate 			new_state = DR_STATE_FATAL;
227*0Sstevel@tonic-gate 			goto done;
228*0Sstevel@tonic-gate 		}
229*0Sstevel@tonic-gate 
230*0Sstevel@tonic-gate 		err = drmach_cpu_get_impl(cp->sbc_cm.sbdev_id, &impl);
231*0Sstevel@tonic-gate 		if (err) {
232*0Sstevel@tonic-gate 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
233*0Sstevel@tonic-gate 			new_state = DR_STATE_FATAL;
234*0Sstevel@tonic-gate 			goto done;
235*0Sstevel@tonic-gate 		}
236*0Sstevel@tonic-gate 	} else {
237*0Sstevel@tonic-gate 		cp->sbc_cpu_id = -1;
238*0Sstevel@tonic-gate 		cp->sbc_cpu_impl = -1;
239*0Sstevel@tonic-gate 		goto done;
240*0Sstevel@tonic-gate 	}
241*0Sstevel@tonic-gate 
242*0Sstevel@tonic-gate 	cp->sbc_cpu_id = cpuid;
243*0Sstevel@tonic-gate 	cp->sbc_cpu_impl = impl;
244*0Sstevel@tonic-gate 
245*0Sstevel@tonic-gate 	/* if true at init time, it must always be true */
246*0Sstevel@tonic-gate 	ASSERT(dr_cpu_unit_is_sane(cp->sbc_cm.sbdev_bp, cp));
247*0Sstevel@tonic-gate 
248*0Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
249*0Sstevel@tonic-gate 	if ((cpuid >= 0) && cpu[cpuid])
250*0Sstevel@tonic-gate 		cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
251*0Sstevel@tonic-gate 	else
252*0Sstevel@tonic-gate 		cp->sbc_cpu_flags = P_OFFLINE | P_POWEROFF;
253*0Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
254*0Sstevel@tonic-gate 
255*0Sstevel@tonic-gate 	dr_cpu_set_prop(cp);
256*0Sstevel@tonic-gate 
257*0Sstevel@tonic-gate done:
258*0Sstevel@tonic-gate 	/* delay transition until fully initialized */
259*0Sstevel@tonic-gate 	dr_device_transition(&cp->sbc_cm, new_state);
260*0Sstevel@tonic-gate }
261*0Sstevel@tonic-gate 
262*0Sstevel@tonic-gate int
263*0Sstevel@tonic-gate dr_pre_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
264*0Sstevel@tonic-gate {
265*0Sstevel@tonic-gate 	int		i;
266*0Sstevel@tonic-gate 	int		curr_cpu;
267*0Sstevel@tonic-gate 	int		next_cpu;
268*0Sstevel@tonic-gate 	static fn_t	f = "dr_pre_attach_cpu";
269*0Sstevel@tonic-gate 
270*0Sstevel@tonic-gate 	PR_CPU("%s...\n", f);
271*0Sstevel@tonic-gate 
272*0Sstevel@tonic-gate 	for (next_cpu = 0, i = 0; i < devnum; i++) {
273*0Sstevel@tonic-gate 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
274*0Sstevel@tonic-gate 
275*0Sstevel@tonic-gate 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
276*0Sstevel@tonic-gate 
277*0Sstevel@tonic-gate 		/*
278*0Sstevel@tonic-gate 		 * Print a console message for each attachment
279*0Sstevel@tonic-gate 		 * point. For CMP devices, this means that only
280*0Sstevel@tonic-gate 		 * one message should be printed, no matter how
281*0Sstevel@tonic-gate 		 * many cores are actually present.
282*0Sstevel@tonic-gate 		 */
283*0Sstevel@tonic-gate 		curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum);
284*0Sstevel@tonic-gate 		if (curr_cpu >= next_cpu) {
285*0Sstevel@tonic-gate 			cmn_err(CE_CONT, "OS configure %s",
286*0Sstevel@tonic-gate 			    up->sbc_cm.sbdev_path);
287*0Sstevel@tonic-gate 			next_cpu = curr_cpu + 1;
288*0Sstevel@tonic-gate 		}
289*0Sstevel@tonic-gate 
290*0Sstevel@tonic-gate 		if (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED) {
291*0Sstevel@tonic-gate 			/*
292*0Sstevel@tonic-gate 			 * If we're coming from the UNCONFIGURED
293*0Sstevel@tonic-gate 			 * state then the cpu's sigblock will
294*0Sstevel@tonic-gate 			 * still be mapped in.  Need to unmap it
295*0Sstevel@tonic-gate 			 * before continuing with attachment.
296*0Sstevel@tonic-gate 			 */
297*0Sstevel@tonic-gate 			PR_CPU("%s: unmapping sigblk for cpu %d\n",
298*0Sstevel@tonic-gate 				f, up->sbc_cpu_id);
299*0Sstevel@tonic-gate 
300*0Sstevel@tonic-gate 			CPU_SGN_MAPOUT(up->sbc_cpu_id);
301*0Sstevel@tonic-gate 		}
302*0Sstevel@tonic-gate 	}
303*0Sstevel@tonic-gate 
304*0Sstevel@tonic-gate 	/*
305*0Sstevel@tonic-gate 	 * Block out status threads while creating
306*0Sstevel@tonic-gate 	 * devinfo tree branches
307*0Sstevel@tonic-gate 	 */
308*0Sstevel@tonic-gate 	dr_lock_status(hp->h_bd);
309*0Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
310*0Sstevel@tonic-gate 
311*0Sstevel@tonic-gate 	return (0);
312*0Sstevel@tonic-gate }
313*0Sstevel@tonic-gate 
314*0Sstevel@tonic-gate /*ARGSUSED*/
315*0Sstevel@tonic-gate void
316*0Sstevel@tonic-gate dr_attach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
317*0Sstevel@tonic-gate {
318*0Sstevel@tonic-gate 	sbd_error_t	*err;
319*0Sstevel@tonic-gate 	processorid_t	 cpuid;
320*0Sstevel@tonic-gate 	int		 rv;
321*0Sstevel@tonic-gate 
322*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
323*0Sstevel@tonic-gate 
324*0Sstevel@tonic-gate 	err = drmach_configure(cp->sbdev_id, 0);
325*0Sstevel@tonic-gate 	if (err) {
326*0Sstevel@tonic-gate 		DRERR_SET_C(&cp->sbdev_error, &err);
327*0Sstevel@tonic-gate 		return;
328*0Sstevel@tonic-gate 	}
329*0Sstevel@tonic-gate 
330*0Sstevel@tonic-gate 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
331*0Sstevel@tonic-gate 	if (err) {
332*0Sstevel@tonic-gate 		DRERR_SET_C(&cp->sbdev_error, &err);
333*0Sstevel@tonic-gate 
334*0Sstevel@tonic-gate 		err = drmach_unconfigure(cp->sbdev_id, DRMACH_DEVI_REMOVE);
335*0Sstevel@tonic-gate 		if (err)
336*0Sstevel@tonic-gate 			sbd_err_clear(&err);
337*0Sstevel@tonic-gate 	} else if ((rv = cpu_configure(cpuid)) != 0) {
338*0Sstevel@tonic-gate 		dr_dev_err(CE_WARN, cp, dr_errno2ecode(rv));
339*0Sstevel@tonic-gate 		err = drmach_unconfigure(cp->sbdev_id,
340*0Sstevel@tonic-gate 				DRMACH_DEVI_REMOVE);
341*0Sstevel@tonic-gate 		if (err)
342*0Sstevel@tonic-gate 			sbd_err_clear(&err);
343*0Sstevel@tonic-gate 	}
344*0Sstevel@tonic-gate }
345*0Sstevel@tonic-gate 
346*0Sstevel@tonic-gate /*
347*0Sstevel@tonic-gate  * dr_post_attach_cpu
348*0Sstevel@tonic-gate  *
349*0Sstevel@tonic-gate  * sbd error policy: Does not stop on error.  Processes all units in list.
350*0Sstevel@tonic-gate  */
351*0Sstevel@tonic-gate int
352*0Sstevel@tonic-gate dr_post_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
353*0Sstevel@tonic-gate {
354*0Sstevel@tonic-gate 	int		i;
355*0Sstevel@tonic-gate 	int		errflag = 0;
356*0Sstevel@tonic-gate 	static fn_t	f = "dr_post_attach_cpu";
357*0Sstevel@tonic-gate 
358*0Sstevel@tonic-gate 	PR_CPU("%s...\n", f);
359*0Sstevel@tonic-gate 	hp->h_ndi = 0;
360*0Sstevel@tonic-gate 
361*0Sstevel@tonic-gate 	/* Startup and online newly-attached CPUs */
362*0Sstevel@tonic-gate 	for (i = 0; i < devnum; i++) {
363*0Sstevel@tonic-gate 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
364*0Sstevel@tonic-gate 		struct cpu	*cp;
365*0Sstevel@tonic-gate 
366*0Sstevel@tonic-gate 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
367*0Sstevel@tonic-gate 
368*0Sstevel@tonic-gate 		cp = cpu_get(up->sbc_cpu_id);
369*0Sstevel@tonic-gate 		if (cp == NULL) {
370*0Sstevel@tonic-gate 			cmn_err(CE_WARN, "%s: cpu_get failed for cpu %d",
371*0Sstevel@tonic-gate 			    f, up->sbc_cpu_id);
372*0Sstevel@tonic-gate 			continue;
373*0Sstevel@tonic-gate 		}
374*0Sstevel@tonic-gate 
375*0Sstevel@tonic-gate 		if (cpu_is_poweredoff(cp)) {
376*0Sstevel@tonic-gate 			if (cpu_poweron(cp) != 0) {
377*0Sstevel@tonic-gate 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTART);
378*0Sstevel@tonic-gate 				errflag = 1;
379*0Sstevel@tonic-gate 			}
380*0Sstevel@tonic-gate 			PR_CPU("%s: cpu %d powered ON\n", f, up->sbc_cpu_id);
381*0Sstevel@tonic-gate 		}
382*0Sstevel@tonic-gate 
383*0Sstevel@tonic-gate 		if (cpu_is_offline(cp)) {
384*0Sstevel@tonic-gate 			PR_CPU("%s: onlining cpu %d...\n", f, up->sbc_cpu_id);
385*0Sstevel@tonic-gate 
386*0Sstevel@tonic-gate 			if (cpu_online(cp) != 0) {
387*0Sstevel@tonic-gate 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_ONLINE);
388*0Sstevel@tonic-gate 				errflag = 1;
389*0Sstevel@tonic-gate 			}
390*0Sstevel@tonic-gate 		}
391*0Sstevel@tonic-gate 
392*0Sstevel@tonic-gate 	}
393*0Sstevel@tonic-gate 
394*0Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
395*0Sstevel@tonic-gate 	dr_unlock_status(hp->h_bd);
396*0Sstevel@tonic-gate 
397*0Sstevel@tonic-gate 	if (errflag)
398*0Sstevel@tonic-gate 		return (-1);
399*0Sstevel@tonic-gate 	else
400*0Sstevel@tonic-gate 		return (0);
401*0Sstevel@tonic-gate }
402*0Sstevel@tonic-gate 
403*0Sstevel@tonic-gate /*
404*0Sstevel@tonic-gate  * dr_pre_release_cpu
405*0Sstevel@tonic-gate  *
406*0Sstevel@tonic-gate  * sbd error policy: Stops on first error.
407*0Sstevel@tonic-gate  */
408*0Sstevel@tonic-gate int
409*0Sstevel@tonic-gate dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
410*0Sstevel@tonic-gate {
411*0Sstevel@tonic-gate 	int		c, cix, i, lastoffline = -1, rv = 0;
412*0Sstevel@tonic-gate 	processorid_t	cpuid;
413*0Sstevel@tonic-gate 	struct cpu	*cp;
414*0Sstevel@tonic-gate 	dr_cpu_unit_t	*up;
415*0Sstevel@tonic-gate 	dr_devset_t	devset;
416*0Sstevel@tonic-gate 	sbd_dev_stat_t	*ds;
417*0Sstevel@tonic-gate 	static fn_t	f = "dr_pre_release_cpu";
418*0Sstevel@tonic-gate 	int		cpu_flags = 0;
419*0Sstevel@tonic-gate 
420*0Sstevel@tonic-gate 	devset = DR_DEVS_PRESENT(hp->h_bd);
421*0Sstevel@tonic-gate 
422*0Sstevel@tonic-gate 	/* allocate status struct storage. */
423*0Sstevel@tonic-gate 	ds = (sbd_dev_stat_t *) kmem_zalloc(sizeof (sbd_dev_stat_t) *
424*0Sstevel@tonic-gate 			MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
425*0Sstevel@tonic-gate 
426*0Sstevel@tonic-gate 	cix = dr_cpu_status(hp, devset, ds);
427*0Sstevel@tonic-gate 
428*0Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
429*0Sstevel@tonic-gate 
430*0Sstevel@tonic-gate 	for (i = 0; i < devnum; i++) {
431*0Sstevel@tonic-gate 		up = (dr_cpu_unit_t *)devlist[i];
432*0Sstevel@tonic-gate 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
433*0Sstevel@tonic-gate 
434*0Sstevel@tonic-gate 		/*
435*0Sstevel@tonic-gate 		 * The STARCAT platform borrows cpus for use by POST in
436*0Sstevel@tonic-gate 		 * iocage testing.  These cpus cannot be unconfigured
437*0Sstevel@tonic-gate 		 * while they are in use for the iocage.
438*0Sstevel@tonic-gate 		 * This check determines if a CPU is currently in use
439*0Sstevel@tonic-gate 		 * for iocage testing, and if so, returns a "Device busy"
440*0Sstevel@tonic-gate 		 * error.
441*0Sstevel@tonic-gate 		 */
442*0Sstevel@tonic-gate 		for (c = 0; c < cix; c++) {
443*0Sstevel@tonic-gate 			if (ds[c].d_cpu.cs_unit == up->sbc_cm.sbdev_unum) {
444*0Sstevel@tonic-gate 				if (ds[c].d_cpu.cs_busy) {
445*0Sstevel@tonic-gate 					dr_dev_err(CE_WARN,
446*0Sstevel@tonic-gate 						&up->sbc_cm, ESBD_BUSY);
447*0Sstevel@tonic-gate 					rv = -1;
448*0Sstevel@tonic-gate 					break;
449*0Sstevel@tonic-gate 				}
450*0Sstevel@tonic-gate 			}
451*0Sstevel@tonic-gate 		}
452*0Sstevel@tonic-gate 		if (c < cix)
453*0Sstevel@tonic-gate 			break;
454*0Sstevel@tonic-gate 		cpuid = up->sbc_cpu_id;
455*0Sstevel@tonic-gate 		if ((cp = cpu_get(cpuid)) == NULL) {
456*0Sstevel@tonic-gate 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
457*0Sstevel@tonic-gate 			rv = -1;
458*0Sstevel@tonic-gate 			break;
459*0Sstevel@tonic-gate 		}
460*0Sstevel@tonic-gate 
461*0Sstevel@tonic-gate 		/* used by dr_cancel_cpu during error flow */
462*0Sstevel@tonic-gate 		up->sbc_cpu_flags = cp->cpu_flags;
463*0Sstevel@tonic-gate 
464*0Sstevel@tonic-gate 		if (CPU_ACTIVE(cp)) {
465*0Sstevel@tonic-gate 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
466*0Sstevel@tonic-gate 				cpu_flags = CPU_FORCED;
467*0Sstevel@tonic-gate 
468*0Sstevel@tonic-gate 			PR_CPU("%s: offlining cpu %d\n", f, cpuid);
469*0Sstevel@tonic-gate 			if (cpu_offline(cp, cpu_flags)) {
470*0Sstevel@tonic-gate 				PR_CPU("%s: failed to offline cpu %d\n",
471*0Sstevel@tonic-gate 					f, cpuid);
472*0Sstevel@tonic-gate 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
473*0Sstevel@tonic-gate 				if (disp_bound_threads(cp, 0)) {
474*0Sstevel@tonic-gate 					cmn_err(CE_WARN, "%s: thread(s) "
475*0Sstevel@tonic-gate 						"bound to cpu %d",
476*0Sstevel@tonic-gate 						f, cp->cpu_id);
477*0Sstevel@tonic-gate 				}
478*0Sstevel@tonic-gate 				rv = -1;
479*0Sstevel@tonic-gate 				break;
480*0Sstevel@tonic-gate 			} else
481*0Sstevel@tonic-gate 				lastoffline = i;
482*0Sstevel@tonic-gate 		}
483*0Sstevel@tonic-gate 
484*0Sstevel@tonic-gate 		if (!rv) {
485*0Sstevel@tonic-gate 			sbd_error_t *err;
486*0Sstevel@tonic-gate 
487*0Sstevel@tonic-gate 			err = drmach_release(up->sbc_cm.sbdev_id);
488*0Sstevel@tonic-gate 			if (err) {
489*0Sstevel@tonic-gate 				DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
490*0Sstevel@tonic-gate 				rv = -1;
491*0Sstevel@tonic-gate 				break;
492*0Sstevel@tonic-gate 			}
493*0Sstevel@tonic-gate 		}
494*0Sstevel@tonic-gate 	}
495*0Sstevel@tonic-gate 
496*0Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
497*0Sstevel@tonic-gate 
498*0Sstevel@tonic-gate 	if (rv) {
499*0Sstevel@tonic-gate 		/*
500*0Sstevel@tonic-gate 		 * Need to unwind others since at this level (pre-release)
501*0Sstevel@tonic-gate 		 * the device state has not yet transitioned and failures
502*0Sstevel@tonic-gate 		 * will prevent us from reaching the "post" release
503*0Sstevel@tonic-gate 		 * function where states are normally transitioned.
504*0Sstevel@tonic-gate 		 */
505*0Sstevel@tonic-gate 		for (i = lastoffline; i >= 0; i--) {
506*0Sstevel@tonic-gate 			up = (dr_cpu_unit_t *)devlist[i];
507*0Sstevel@tonic-gate 			(void) dr_cancel_cpu(up);
508*0Sstevel@tonic-gate 		}
509*0Sstevel@tonic-gate 	}
510*0Sstevel@tonic-gate 
511*0Sstevel@tonic-gate 	kmem_free(ds, sizeof (sbd_dev_stat_t) * MAX_CPU_UNITS_PER_BOARD);
512*0Sstevel@tonic-gate 	return (rv);
513*0Sstevel@tonic-gate }
514*0Sstevel@tonic-gate 
515*0Sstevel@tonic-gate /*
516*0Sstevel@tonic-gate  * dr_pre_detach_cpu
517*0Sstevel@tonic-gate  *
518*0Sstevel@tonic-gate  * sbd error policy: Stops on first error.
519*0Sstevel@tonic-gate  */
520*0Sstevel@tonic-gate int
521*0Sstevel@tonic-gate dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
522*0Sstevel@tonic-gate {
523*0Sstevel@tonic-gate 	_NOTE(ARGUNUSED(hp))
524*0Sstevel@tonic-gate 
525*0Sstevel@tonic-gate 	int		i;
526*0Sstevel@tonic-gate 	int		curr_cpu;
527*0Sstevel@tonic-gate 	int		next_cpu;
528*0Sstevel@tonic-gate 	int		cpu_flags = 0;
529*0Sstevel@tonic-gate 	static fn_t	f = "dr_pre_detach_cpu";
530*0Sstevel@tonic-gate 
531*0Sstevel@tonic-gate 	PR_CPU("%s...\n", f);
532*0Sstevel@tonic-gate 
533*0Sstevel@tonic-gate 	/*
534*0Sstevel@tonic-gate 	 * Block out status threads while destroying devinfo tree
535*0Sstevel@tonic-gate 	 * branches
536*0Sstevel@tonic-gate 	 */
537*0Sstevel@tonic-gate 	dr_lock_status(hp->h_bd);
538*0Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
539*0Sstevel@tonic-gate 
540*0Sstevel@tonic-gate 	for (next_cpu = 0, i = 0; i < devnum; i++) {
541*0Sstevel@tonic-gate 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
542*0Sstevel@tonic-gate 		struct cpu	*cp;
543*0Sstevel@tonic-gate 
544*0Sstevel@tonic-gate 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
545*0Sstevel@tonic-gate 
546*0Sstevel@tonic-gate 		cp = cpu_get(up->sbc_cpu_id);
547*0Sstevel@tonic-gate 		if (cp == NULL)
548*0Sstevel@tonic-gate 			continue;
549*0Sstevel@tonic-gate 
550*0Sstevel@tonic-gate 		/*
551*0Sstevel@tonic-gate 		 * Print a console message for each attachment
552*0Sstevel@tonic-gate 		 * point. For CMP devices, this means that only
553*0Sstevel@tonic-gate 		 * one message should be printed, no matter how
554*0Sstevel@tonic-gate 		 * many cores are actually present.
555*0Sstevel@tonic-gate 		 */
556*0Sstevel@tonic-gate 		curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum);
557*0Sstevel@tonic-gate 		if (curr_cpu >= next_cpu) {
558*0Sstevel@tonic-gate 			cmn_err(CE_CONT, "OS unconfigure %s\n",
559*0Sstevel@tonic-gate 			    up->sbc_cm.sbdev_path);
560*0Sstevel@tonic-gate 			next_cpu = curr_cpu + 1;
561*0Sstevel@tonic-gate 		}
562*0Sstevel@tonic-gate 
563*0Sstevel@tonic-gate 		/*
564*0Sstevel@tonic-gate 		 * CPUs were offlined during Release.
565*0Sstevel@tonic-gate 		 */
566*0Sstevel@tonic-gate 		if (cpu_is_poweredoff(cp)) {
567*0Sstevel@tonic-gate 			PR_CPU("%s: cpu %d already powered OFF\n",
568*0Sstevel@tonic-gate 			    f, up->sbc_cpu_id);
569*0Sstevel@tonic-gate 			continue;
570*0Sstevel@tonic-gate 		}
571*0Sstevel@tonic-gate 
572*0Sstevel@tonic-gate 		if (!cpu_is_offline(cp)) {
573*0Sstevel@tonic-gate 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
574*0Sstevel@tonic-gate 				cpu_flags = CPU_FORCED;
575*0Sstevel@tonic-gate 			/* cpu was onlined after release.  Offline it again */
576*0Sstevel@tonic-gate 			PR_CPU("%s: offlining cpu %d\n", f, up->sbc_cpu_id);
577*0Sstevel@tonic-gate 			if (cpu_offline(cp, cpu_flags)) {
578*0Sstevel@tonic-gate 				PR_CPU("%s: failed to offline cpu %d\n",
579*0Sstevel@tonic-gate 				    f, up->sbc_cpu_id);
580*0Sstevel@tonic-gate 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
581*0Sstevel@tonic-gate 				if (disp_bound_threads(cp, 0)) {
582*0Sstevel@tonic-gate 					cmn_err(CE_WARN, "%s: thread(s) "
583*0Sstevel@tonic-gate 						"bound to cpu %d",
584*0Sstevel@tonic-gate 						f, cp->cpu_id);
585*0Sstevel@tonic-gate 				}
586*0Sstevel@tonic-gate 				goto err;
587*0Sstevel@tonic-gate 			}
588*0Sstevel@tonic-gate 		}
589*0Sstevel@tonic-gate 		if (cpu_poweroff(cp) != 0) {
590*0Sstevel@tonic-gate 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTOP);
591*0Sstevel@tonic-gate 			goto err;
592*0Sstevel@tonic-gate 		} else {
593*0Sstevel@tonic-gate 			PR_CPU("%s: cpu %d powered OFF\n", f, up->sbc_cpu_id);
594*0Sstevel@tonic-gate 		}
595*0Sstevel@tonic-gate 	}
596*0Sstevel@tonic-gate 
597*0Sstevel@tonic-gate 	return (0);
598*0Sstevel@tonic-gate 
599*0Sstevel@tonic-gate err:
600*0Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
601*0Sstevel@tonic-gate 	dr_unlock_status(hp->h_bd);
602*0Sstevel@tonic-gate 	return (-1);
603*0Sstevel@tonic-gate }
604*0Sstevel@tonic-gate 
605*0Sstevel@tonic-gate /*ARGSUSED*/
606*0Sstevel@tonic-gate void
607*0Sstevel@tonic-gate dr_detach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
608*0Sstevel@tonic-gate {
609*0Sstevel@tonic-gate 	sbd_error_t	*err;
610*0Sstevel@tonic-gate 	processorid_t	 cpuid;
611*0Sstevel@tonic-gate 	int		 rv;
612*0Sstevel@tonic-gate 
613*0Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&cpu_lock));
614*0Sstevel@tonic-gate 
615*0Sstevel@tonic-gate 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
616*0Sstevel@tonic-gate 	if (err) {
617*0Sstevel@tonic-gate 		DRERR_SET_C(&cp->sbdev_error, &err);
618*0Sstevel@tonic-gate 	} else if ((rv = cpu_unconfigure(cpuid)) != 0) {
619*0Sstevel@tonic-gate 		dr_dev_err(CE_IGNORE, cp, dr_errno2ecode(rv));
620*0Sstevel@tonic-gate 	} else {
621*0Sstevel@tonic-gate 		err = drmach_unconfigure(cp->sbdev_id, DRMACH_DEVI_REMOVE);
622*0Sstevel@tonic-gate 		if (err) {
623*0Sstevel@tonic-gate 			DRERR_SET_C(&cp->sbdev_error, &err);
624*0Sstevel@tonic-gate 		}
625*0Sstevel@tonic-gate 	}
626*0Sstevel@tonic-gate }
627*0Sstevel@tonic-gate 
628*0Sstevel@tonic-gate /*ARGSUSED1*/
629*0Sstevel@tonic-gate int
630*0Sstevel@tonic-gate dr_post_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
631*0Sstevel@tonic-gate {
632*0Sstevel@tonic-gate 	static fn_t	f = "dr_post_detach_cpu";
633*0Sstevel@tonic-gate 
634*0Sstevel@tonic-gate 	PR_CPU("%s...\n", f);
635*0Sstevel@tonic-gate 	hp->h_ndi = 0;
636*0Sstevel@tonic-gate 
637*0Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
638*0Sstevel@tonic-gate 	dr_unlock_status(hp->h_bd);
639*0Sstevel@tonic-gate 
640*0Sstevel@tonic-gate 	return (0);
641*0Sstevel@tonic-gate }
642*0Sstevel@tonic-gate 
643*0Sstevel@tonic-gate static void
644*0Sstevel@tonic-gate dr_fill_cpu_stat(dr_cpu_unit_t *cp, drmach_status_t *pstat, sbd_cpu_stat_t *csp)
645*0Sstevel@tonic-gate {
646*0Sstevel@tonic-gate 	ASSERT(cp && pstat && csp);
647*0Sstevel@tonic-gate 
648*0Sstevel@tonic-gate 	/* Fill in the common status information */
649*0Sstevel@tonic-gate 	bzero((caddr_t)csp, sizeof (*csp));
650*0Sstevel@tonic-gate 	csp->cs_type = cp->sbc_cm.sbdev_type;
651*0Sstevel@tonic-gate 	csp->cs_unit = cp->sbc_cm.sbdev_unum;
652*0Sstevel@tonic-gate 	strncpy(csp->cs_name, pstat->type, sizeof (csp->cs_name));
653*0Sstevel@tonic-gate 	csp->cs_cond = cp->sbc_cm.sbdev_cond;
654*0Sstevel@tonic-gate 	csp->cs_busy = cp->sbc_cm.sbdev_busy | pstat->busy;
655*0Sstevel@tonic-gate 	csp->cs_time = cp->sbc_cm.sbdev_time;
656*0Sstevel@tonic-gate 	csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
657*0Sstevel@tonic-gate 	csp->cs_suspend = 0;
658*0Sstevel@tonic-gate 
659*0Sstevel@tonic-gate 	/* CPU specific status data */
660*0Sstevel@tonic-gate 	csp->cs_cpuid = cp->sbc_cpu_id;
661*0Sstevel@tonic-gate 
662*0Sstevel@tonic-gate #ifdef _STARFIRE
663*0Sstevel@tonic-gate 	csp->cs_isbootproc = (SIGBCPU->cpu_id == cp->sbc_cpu_id) ? 1 : 0;
664*0Sstevel@tonic-gate #endif /* _STARFIRE */
665*0Sstevel@tonic-gate 
666*0Sstevel@tonic-gate 	/*
667*0Sstevel@tonic-gate 	 * If the speed and ecache properties have not been
668*0Sstevel@tonic-gate 	 * cached yet, read them in from the device tree.
669*0Sstevel@tonic-gate 	 */
670*0Sstevel@tonic-gate 	if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
671*0Sstevel@tonic-gate 		dr_cpu_set_prop(cp);
672*0Sstevel@tonic-gate 
673*0Sstevel@tonic-gate 	/* use the cached speed and ecache values */
674*0Sstevel@tonic-gate 	csp->cs_speed = cp->sbc_speed;
675*0Sstevel@tonic-gate 	csp->cs_ecache = cp->sbc_ecache;
676*0Sstevel@tonic-gate 
677*0Sstevel@tonic-gate 	mutex_enter(&cpu_lock);
678*0Sstevel@tonic-gate 	if (!cpu_get(csp->cs_cpuid)) {
679*0Sstevel@tonic-gate 		/* ostate must be UNCONFIGURED */
680*0Sstevel@tonic-gate 		csp->cs_cm.c_ostate = SBD_STAT_UNCONFIGURED;
681*0Sstevel@tonic-gate 	}
682*0Sstevel@tonic-gate 	mutex_exit(&cpu_lock);
683*0Sstevel@tonic-gate }
684*0Sstevel@tonic-gate 
685*0Sstevel@tonic-gate static void
686*0Sstevel@tonic-gate dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
687*0Sstevel@tonic-gate {
688*0Sstevel@tonic-gate 	int	core;
689*0Sstevel@tonic-gate 
690*0Sstevel@tonic-gate 	ASSERT(csp && psp && (ncores >= 1));
691*0Sstevel@tonic-gate 
692*0Sstevel@tonic-gate 	bzero((caddr_t)psp, sizeof (*psp));
693*0Sstevel@tonic-gate 
694*0Sstevel@tonic-gate 	/*
695*0Sstevel@tonic-gate 	 * Fill in the common status information based
696*0Sstevel@tonic-gate 	 * on the data for the first core.
697*0Sstevel@tonic-gate 	 */
698*0Sstevel@tonic-gate 	psp->ps_type = SBD_COMP_CMP;
699*0Sstevel@tonic-gate 	psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit);
700*0Sstevel@tonic-gate 	strncpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
701*0Sstevel@tonic-gate 	psp->ps_cond = csp->cs_cond;
702*0Sstevel@tonic-gate 	psp->ps_busy = csp->cs_busy;
703*0Sstevel@tonic-gate 	psp->ps_time = csp->cs_time;
704*0Sstevel@tonic-gate 	psp->ps_ostate = csp->cs_ostate;
705*0Sstevel@tonic-gate 	psp->ps_suspend = csp->cs_suspend;
706*0Sstevel@tonic-gate 
707*0Sstevel@tonic-gate 	/* CMP specific status data */
708*0Sstevel@tonic-gate 	*psp->ps_cpuid = csp->cs_cpuid;
709*0Sstevel@tonic-gate 	psp->ps_ncores = 1;
710*0Sstevel@tonic-gate 	psp->ps_speed = csp->cs_speed;
711*0Sstevel@tonic-gate 	psp->ps_ecache = csp->cs_ecache;
712*0Sstevel@tonic-gate 
713*0Sstevel@tonic-gate 	/*
714*0Sstevel@tonic-gate 	 * Walk through the data for the remaining cores.
715*0Sstevel@tonic-gate 	 * Make any adjustments to the common status data,
716*0Sstevel@tonic-gate 	 * or the shared CMP specific data if necessary.
717*0Sstevel@tonic-gate 	 */
718*0Sstevel@tonic-gate 	for (core = 1; core < ncores; core++) {
719*0Sstevel@tonic-gate 
720*0Sstevel@tonic-gate 		/*
721*0Sstevel@tonic-gate 		 * The following properties should be the same
722*0Sstevel@tonic-gate 		 * for all the cores of the CMP.
723*0Sstevel@tonic-gate 		 */
724*0Sstevel@tonic-gate 		ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(csp[core].cs_unit));
725*0Sstevel@tonic-gate 		ASSERT(psp->ps_speed == csp[core].cs_speed);
726*0Sstevel@tonic-gate 
727*0Sstevel@tonic-gate 		psp->ps_cpuid[core] = csp[core].cs_cpuid;
728*0Sstevel@tonic-gate 		psp->ps_ncores++;
729*0Sstevel@tonic-gate 
730*0Sstevel@tonic-gate 		/*
731*0Sstevel@tonic-gate 		 * Jaguar has a split ecache, so the ecache
732*0Sstevel@tonic-gate 		 * for each core must be added together to
733*0Sstevel@tonic-gate 		 * get the total ecache for the whole chip.
734*0Sstevel@tonic-gate 		 */
735*0Sstevel@tonic-gate 		if (IS_JAGUAR(impl)) {
736*0Sstevel@tonic-gate 			psp->ps_ecache += csp[core].cs_ecache;
737*0Sstevel@tonic-gate 		}
738*0Sstevel@tonic-gate 
739*0Sstevel@tonic-gate 		/* adjust time if necessary */
740*0Sstevel@tonic-gate 		if (csp[core].cs_time > psp->ps_time) {
741*0Sstevel@tonic-gate 			psp->ps_time = csp[core].cs_time;
742*0Sstevel@tonic-gate 		}
743*0Sstevel@tonic-gate 
744*0Sstevel@tonic-gate 		psp->ps_busy |= csp[core].cs_busy;
745*0Sstevel@tonic-gate 
746*0Sstevel@tonic-gate 		/*
747*0Sstevel@tonic-gate 		 * If any of the cores are configured, the
748*0Sstevel@tonic-gate 		 * entire CMP is marked as configured.
749*0Sstevel@tonic-gate 		 */
750*0Sstevel@tonic-gate 		if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
751*0Sstevel@tonic-gate 			psp->ps_ostate = csp[core].cs_ostate;
752*0Sstevel@tonic-gate 		}
753*0Sstevel@tonic-gate 	}
754*0Sstevel@tonic-gate }
755*0Sstevel@tonic-gate 
756*0Sstevel@tonic-gate int
757*0Sstevel@tonic-gate dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
758*0Sstevel@tonic-gate {
759*0Sstevel@tonic-gate 	int		cmp;
760*0Sstevel@tonic-gate 	int		core;
761*0Sstevel@tonic-gate 	int		ncpu;
762*0Sstevel@tonic-gate 	dr_board_t	*bp;
763*0Sstevel@tonic-gate 	sbd_cpu_stat_t	cstat[MAX_CORES_PER_CMP];
764*0Sstevel@tonic-gate 
765*0Sstevel@tonic-gate 	bp = hp->h_bd;
766*0Sstevel@tonic-gate 	ncpu = 0;
767*0Sstevel@tonic-gate 
768*0Sstevel@tonic-gate 	devset &= DR_DEVS_PRESENT(bp);
769*0Sstevel@tonic-gate 
770*0Sstevel@tonic-gate 	/*
771*0Sstevel@tonic-gate 	 * Treat every CPU as a CMP. In the case where the
772*0Sstevel@tonic-gate 	 * device is not a CMP, treat it as a CMP with only
773*0Sstevel@tonic-gate 	 * one core.
774*0Sstevel@tonic-gate 	 */
775*0Sstevel@tonic-gate 	for (cmp = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
776*0Sstevel@tonic-gate 
777*0Sstevel@tonic-gate 		int		ncores;
778*0Sstevel@tonic-gate 		dr_cpu_unit_t	*cp;
779*0Sstevel@tonic-gate 		drmach_status_t	pstat;
780*0Sstevel@tonic-gate 		sbd_error_t	*err;
781*0Sstevel@tonic-gate 		sbd_cmp_stat_t	*psp;
782*0Sstevel@tonic-gate 
783*0Sstevel@tonic-gate 		if ((devset & DEVSET(SBD_COMP_CMP, cmp)) == 0) {
784*0Sstevel@tonic-gate 			continue;
785*0Sstevel@tonic-gate 		}
786*0Sstevel@tonic-gate 
787*0Sstevel@tonic-gate 		ncores = 0;
788*0Sstevel@tonic-gate 
789*0Sstevel@tonic-gate 		for (core = 0; core < MAX_CORES_PER_CMP; core++) {
790*0Sstevel@tonic-gate 
791*0Sstevel@tonic-gate 			cp = dr_get_cpu_unit(bp, DR_CMP_CORE_UNUM(cmp, core));
792*0Sstevel@tonic-gate 
793*0Sstevel@tonic-gate 			if (cp->sbc_cm.sbdev_state == DR_STATE_EMPTY) {
794*0Sstevel@tonic-gate 				/* present, but not fully initialized */
795*0Sstevel@tonic-gate 				continue;
796*0Sstevel@tonic-gate 			}
797*0Sstevel@tonic-gate 
798*0Sstevel@tonic-gate 			ASSERT(dr_cpu_unit_is_sane(hp->h_bd, cp));
799*0Sstevel@tonic-gate 
800*0Sstevel@tonic-gate 			/* skip if not present */
801*0Sstevel@tonic-gate 			if (cp->sbc_cm.sbdev_id == (drmachid_t)0) {
802*0Sstevel@tonic-gate 				continue;
803*0Sstevel@tonic-gate 			}
804*0Sstevel@tonic-gate 
805*0Sstevel@tonic-gate 			/* fetch platform status */
806*0Sstevel@tonic-gate 			err = drmach_status(cp->sbc_cm.sbdev_id, &pstat);
807*0Sstevel@tonic-gate 			if (err) {
808*0Sstevel@tonic-gate 				DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
809*0Sstevel@tonic-gate 				continue;
810*0Sstevel@tonic-gate 			}
811*0Sstevel@tonic-gate 
812*0Sstevel@tonic-gate 			dr_fill_cpu_stat(cp, &pstat, &cstat[ncores++]);
813*0Sstevel@tonic-gate 		}
814*0Sstevel@tonic-gate 
815*0Sstevel@tonic-gate 		if (ncores == 0) {
816*0Sstevel@tonic-gate 			continue;
817*0Sstevel@tonic-gate 		}
818*0Sstevel@tonic-gate 
819*0Sstevel@tonic-gate 		/*
820*0Sstevel@tonic-gate 		 * Store the data to the outgoing array. If the
821*0Sstevel@tonic-gate 		 * device is a CMP, combine all the data for the
822*0Sstevel@tonic-gate 		 * cores into a single stat structure.
823*0Sstevel@tonic-gate 		 *
824*0Sstevel@tonic-gate 		 * The check for a CMP device uses the last core
825*0Sstevel@tonic-gate 		 * found, assuming that all cores will have the
826*0Sstevel@tonic-gate 		 * same implementation.
827*0Sstevel@tonic-gate 		 */
828*0Sstevel@tonic-gate 		if (CPU_IMPL_IS_CMP(cp->sbc_cpu_impl)) {
829*0Sstevel@tonic-gate 			psp = (sbd_cmp_stat_t *)dsp;
830*0Sstevel@tonic-gate 			dr_fill_cmp_stat(cstat, ncores, cp->sbc_cpu_impl, psp);
831*0Sstevel@tonic-gate 		} else {
832*0Sstevel@tonic-gate 			ASSERT(ncores == 1);
833*0Sstevel@tonic-gate 			bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
834*0Sstevel@tonic-gate 		}
835*0Sstevel@tonic-gate 
836*0Sstevel@tonic-gate 		dsp++;
837*0Sstevel@tonic-gate 		ncpu++;
838*0Sstevel@tonic-gate 	}
839*0Sstevel@tonic-gate 
840*0Sstevel@tonic-gate 	return (ncpu);
841*0Sstevel@tonic-gate }
842*0Sstevel@tonic-gate 
843*0Sstevel@tonic-gate /*
844*0Sstevel@tonic-gate  * Cancel previous release operation for cpu.
845*0Sstevel@tonic-gate  * For cpus this means simply bringing cpus that
846*0Sstevel@tonic-gate  * were offline back online.  Note that they had
847*0Sstevel@tonic-gate  * to have been online at the time there were
848*0Sstevel@tonic-gate  * released.
849*0Sstevel@tonic-gate  */
850*0Sstevel@tonic-gate int
851*0Sstevel@tonic-gate dr_cancel_cpu(dr_cpu_unit_t *up)
852*0Sstevel@tonic-gate {
853*0Sstevel@tonic-gate 	int		rv = 0;
854*0Sstevel@tonic-gate 	static fn_t	f = "dr_cancel_cpu";
855*0Sstevel@tonic-gate 
856*0Sstevel@tonic-gate 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
857*0Sstevel@tonic-gate 
858*0Sstevel@tonic-gate 	if (cpu_flagged_active(up->sbc_cpu_flags)) {
859*0Sstevel@tonic-gate 		struct cpu	*cp;
860*0Sstevel@tonic-gate 
861*0Sstevel@tonic-gate 		/*
862*0Sstevel@tonic-gate 		 * CPU had been online, go ahead
863*0Sstevel@tonic-gate 		 * bring it back online.
864*0Sstevel@tonic-gate 		 */
865*0Sstevel@tonic-gate 		PR_CPU("%s: bringing cpu %d back ONLINE\n",
866*0Sstevel@tonic-gate 			f, up->sbc_cpu_id);
867*0Sstevel@tonic-gate 
868*0Sstevel@tonic-gate 		mutex_enter(&cpu_lock);
869*0Sstevel@tonic-gate 		cp = cpu[up->sbc_cpu_id];
870*0Sstevel@tonic-gate 
871*0Sstevel@tonic-gate 		if (cpu_is_poweredoff(cp)) {
872*0Sstevel@tonic-gate 			if (cpu_poweron(cp)) {
873*0Sstevel@tonic-gate 				cmn_err(CE_WARN, "%s: failed to power-on "
874*0Sstevel@tonic-gate 				    "cpu %d", f, up->sbc_cpu_id);
875*0Sstevel@tonic-gate 				rv = -1;
876*0Sstevel@tonic-gate 			}
877*0Sstevel@tonic-gate 		}
878*0Sstevel@tonic-gate 
879*0Sstevel@tonic-gate 		if (cpu_is_offline(cp)) {
880*0Sstevel@tonic-gate 			if (cpu_online(cp)) {
881*0Sstevel@tonic-gate 				cmn_err(CE_WARN, "%s: failed to online cpu %d",
882*0Sstevel@tonic-gate 				    f, up->sbc_cpu_id);
883*0Sstevel@tonic-gate 				rv = -1;
884*0Sstevel@tonic-gate 			}
885*0Sstevel@tonic-gate 		}
886*0Sstevel@tonic-gate 
887*0Sstevel@tonic-gate 		if (cpu_is_online(cp)) {
888*0Sstevel@tonic-gate 			if (cpu_flagged_nointr(up->sbc_cpu_flags)) {
889*0Sstevel@tonic-gate 				if (cpu_intr_disable(cp) != 0) {
890*0Sstevel@tonic-gate 					cmn_err(CE_WARN, "%s: failed to "
891*0Sstevel@tonic-gate 					    "disable interrupts on cpu %d",
892*0Sstevel@tonic-gate 					    f, up->sbc_cpu_id);
893*0Sstevel@tonic-gate 				}
894*0Sstevel@tonic-gate 			}
895*0Sstevel@tonic-gate 		}
896*0Sstevel@tonic-gate 
897*0Sstevel@tonic-gate 		mutex_exit(&cpu_lock);
898*0Sstevel@tonic-gate 	}
899*0Sstevel@tonic-gate 
900*0Sstevel@tonic-gate 	return (rv);
901*0Sstevel@tonic-gate }
902*0Sstevel@tonic-gate 
903*0Sstevel@tonic-gate int
904*0Sstevel@tonic-gate dr_disconnect_cpu(dr_cpu_unit_t *up)
905*0Sstevel@tonic-gate {
906*0Sstevel@tonic-gate 	sbd_error_t	*err;
907*0Sstevel@tonic-gate 	static fn_t	f = "dr_disconnect_cpu";
908*0Sstevel@tonic-gate 
909*0Sstevel@tonic-gate 	PR_CPU("%s...\n", f);
910*0Sstevel@tonic-gate 
911*0Sstevel@tonic-gate 	ASSERT((up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) ||
912*0Sstevel@tonic-gate 		(up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED));
913*0Sstevel@tonic-gate 
914*0Sstevel@tonic-gate 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
915*0Sstevel@tonic-gate 
916*0Sstevel@tonic-gate 	if (up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) {
917*0Sstevel@tonic-gate 		/*
918*0Sstevel@tonic-gate 		 * Cpus were never brought in and so are still
919*0Sstevel@tonic-gate 		 * effectively disconnected, so nothing to do here.
920*0Sstevel@tonic-gate 		 */
921*0Sstevel@tonic-gate 		PR_CPU("%s: cpu %d never brought in\n",
922*0Sstevel@tonic-gate 			f, up->sbc_cpu_id);
923*0Sstevel@tonic-gate 		return (0);
924*0Sstevel@tonic-gate 	}
925*0Sstevel@tonic-gate 
926*0Sstevel@tonic-gate 	err = drmach_cpu_disconnect(up->sbc_cm.sbdev_id);
927*0Sstevel@tonic-gate 	if (err == NULL)
928*0Sstevel@tonic-gate 		return (0);
929*0Sstevel@tonic-gate 	else {
930*0Sstevel@tonic-gate 		DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
931*0Sstevel@tonic-gate 		return (-1);
932*0Sstevel@tonic-gate 	}
933*0Sstevel@tonic-gate 	/*NOTREACHED*/
934*0Sstevel@tonic-gate }
935