1*12004Sjiang.liu@intel.com /*
2*12004Sjiang.liu@intel.com * CDDL HEADER START
3*12004Sjiang.liu@intel.com *
4*12004Sjiang.liu@intel.com * The contents of this file are subject to the terms of the
5*12004Sjiang.liu@intel.com * Common Development and Distribution License (the "License").
6*12004Sjiang.liu@intel.com * You may not use this file except in compliance with the License.
7*12004Sjiang.liu@intel.com *
8*12004Sjiang.liu@intel.com * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*12004Sjiang.liu@intel.com * or http://www.opensolaris.org/os/licensing.
10*12004Sjiang.liu@intel.com * See the License for the specific language governing permissions
11*12004Sjiang.liu@intel.com * and limitations under the License.
12*12004Sjiang.liu@intel.com *
13*12004Sjiang.liu@intel.com * When distributing Covered Code, include this CDDL HEADER in each
14*12004Sjiang.liu@intel.com * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*12004Sjiang.liu@intel.com * If applicable, add the following below this CDDL HEADER, with the
16*12004Sjiang.liu@intel.com * fields enclosed by brackets "[]" replaced with your own identifying
17*12004Sjiang.liu@intel.com * information: Portions Copyright [yyyy] [name of copyright owner]
18*12004Sjiang.liu@intel.com *
19*12004Sjiang.liu@intel.com * CDDL HEADER END
20*12004Sjiang.liu@intel.com */
21*12004Sjiang.liu@intel.com
22*12004Sjiang.liu@intel.com /*
23*12004Sjiang.liu@intel.com * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24*12004Sjiang.liu@intel.com * Use is subject to license terms.
25*12004Sjiang.liu@intel.com */
26*12004Sjiang.liu@intel.com /*
27*12004Sjiang.liu@intel.com * Copyright (c) 2010, Intel Corporation.
28*12004Sjiang.liu@intel.com * All rights reserved.
29*12004Sjiang.liu@intel.com */
30*12004Sjiang.liu@intel.com
31*12004Sjiang.liu@intel.com /*
32*12004Sjiang.liu@intel.com * CPU support routines for DR
33*12004Sjiang.liu@intel.com */
34*12004Sjiang.liu@intel.com
35*12004Sjiang.liu@intel.com #include <sys/note.h>
36*12004Sjiang.liu@intel.com #include <sys/debug.h>
37*12004Sjiang.liu@intel.com #include <sys/types.h>
38*12004Sjiang.liu@intel.com #include <sys/errno.h>
39*12004Sjiang.liu@intel.com #include <sys/dditypes.h>
40*12004Sjiang.liu@intel.com #include <sys/ddi.h>
41*12004Sjiang.liu@intel.com #include <sys/sunddi.h>
42*12004Sjiang.liu@intel.com #include <sys/sunndi.h>
43*12004Sjiang.liu@intel.com #include <sys/ndi_impldefs.h>
44*12004Sjiang.liu@intel.com #include <sys/kmem.h>
45*12004Sjiang.liu@intel.com #include <sys/processor.h>
46*12004Sjiang.liu@intel.com #include <sys/cpuvar.h>
47*12004Sjiang.liu@intel.com #include <sys/promif.h>
48*12004Sjiang.liu@intel.com #include <sys/sysmacros.h>
49*12004Sjiang.liu@intel.com #include <sys/archsystm.h>
50*12004Sjiang.liu@intel.com #include <sys/machsystm.h>
51*12004Sjiang.liu@intel.com #include <sys/cpu_module.h>
52*12004Sjiang.liu@intel.com #include <sys/cmn_err.h>
53*12004Sjiang.liu@intel.com
54*12004Sjiang.liu@intel.com #include <sys/dr.h>
55*12004Sjiang.liu@intel.com #include <sys/dr_util.h>
56*12004Sjiang.liu@intel.com
57*12004Sjiang.liu@intel.com /* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */
58*12004Sjiang.liu@intel.com static char *dr_ie_fmt = "dr_cpu.c %d";
59*12004Sjiang.liu@intel.com
60*12004Sjiang.liu@intel.com int
dr_cpu_unit_is_sane(dr_board_t * bp,dr_cpu_unit_t * cp)61*12004Sjiang.liu@intel.com dr_cpu_unit_is_sane(dr_board_t *bp, dr_cpu_unit_t *cp)
62*12004Sjiang.liu@intel.com {
63*12004Sjiang.liu@intel.com #ifdef DEBUG
64*12004Sjiang.liu@intel.com ASSERT(cp->sbc_cm.sbdev_bp == bp);
65*12004Sjiang.liu@intel.com ASSERT(cp->sbc_cm.sbdev_type == SBD_COMP_CPU);
66*12004Sjiang.liu@intel.com #else
67*12004Sjiang.liu@intel.com _NOTE(ARGUNUSED(bp))
68*12004Sjiang.liu@intel.com _NOTE(ARGUNUSED(cp))
69*12004Sjiang.liu@intel.com #endif
70*12004Sjiang.liu@intel.com
71*12004Sjiang.liu@intel.com return (1);
72*12004Sjiang.liu@intel.com }
73*12004Sjiang.liu@intel.com
74*12004Sjiang.liu@intel.com static int
dr_errno2ecode(int error)75*12004Sjiang.liu@intel.com dr_errno2ecode(int error)
76*12004Sjiang.liu@intel.com {
77*12004Sjiang.liu@intel.com int rv;
78*12004Sjiang.liu@intel.com
79*12004Sjiang.liu@intel.com switch (error) {
80*12004Sjiang.liu@intel.com case EBUSY:
81*12004Sjiang.liu@intel.com rv = ESBD_BUSY;
82*12004Sjiang.liu@intel.com break;
83*12004Sjiang.liu@intel.com case EINVAL:
84*12004Sjiang.liu@intel.com rv = ESBD_INVAL;
85*12004Sjiang.liu@intel.com break;
86*12004Sjiang.liu@intel.com case EALREADY:
87*12004Sjiang.liu@intel.com rv = ESBD_ALREADY;
88*12004Sjiang.liu@intel.com break;
89*12004Sjiang.liu@intel.com case ENODEV:
90*12004Sjiang.liu@intel.com rv = ESBD_NODEV;
91*12004Sjiang.liu@intel.com break;
92*12004Sjiang.liu@intel.com case ENOMEM:
93*12004Sjiang.liu@intel.com rv = ESBD_NOMEM;
94*12004Sjiang.liu@intel.com break;
95*12004Sjiang.liu@intel.com default:
96*12004Sjiang.liu@intel.com rv = ESBD_INVAL;
97*12004Sjiang.liu@intel.com }
98*12004Sjiang.liu@intel.com
99*12004Sjiang.liu@intel.com return (rv);
100*12004Sjiang.liu@intel.com }
101*12004Sjiang.liu@intel.com
102*12004Sjiang.liu@intel.com /*
103*12004Sjiang.liu@intel.com * On x86, the "clock-frequency" and cache size device properties may be
104*12004Sjiang.liu@intel.com * unavailable before CPU starts. If they are unavailabe, just set them to zero.
105*12004Sjiang.liu@intel.com */
106*12004Sjiang.liu@intel.com static void
dr_cpu_set_prop(dr_cpu_unit_t * cp)107*12004Sjiang.liu@intel.com dr_cpu_set_prop(dr_cpu_unit_t *cp)
108*12004Sjiang.liu@intel.com {
109*12004Sjiang.liu@intel.com sbd_error_t *err;
110*12004Sjiang.liu@intel.com dev_info_t *dip;
111*12004Sjiang.liu@intel.com uint64_t clock_freq;
112*12004Sjiang.liu@intel.com int ecache_size = 0;
113*12004Sjiang.liu@intel.com char *cache_str = NULL;
114*12004Sjiang.liu@intel.com
115*12004Sjiang.liu@intel.com err = drmach_get_dip(cp->sbc_cm.sbdev_id, &dip);
116*12004Sjiang.liu@intel.com if (err) {
117*12004Sjiang.liu@intel.com DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
118*12004Sjiang.liu@intel.com return;
119*12004Sjiang.liu@intel.com }
120*12004Sjiang.liu@intel.com
121*12004Sjiang.liu@intel.com if (dip == NULL) {
122*12004Sjiang.liu@intel.com DR_DEV_INTERNAL_ERROR(&cp->sbc_cm);
123*12004Sjiang.liu@intel.com return;
124*12004Sjiang.liu@intel.com }
125*12004Sjiang.liu@intel.com
126*12004Sjiang.liu@intel.com /* read in the CPU speed */
127*12004Sjiang.liu@intel.com clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
128*12004Sjiang.liu@intel.com DDI_PROP_DONTPASS, "clock-frequency", 0);
129*12004Sjiang.liu@intel.com
130*12004Sjiang.liu@intel.com /*
131*12004Sjiang.liu@intel.com * The ecache property string is not the same
132*12004Sjiang.liu@intel.com * for all CPU implementations.
133*12004Sjiang.liu@intel.com */
134*12004Sjiang.liu@intel.com switch (cp->sbc_cpu_impl) {
135*12004Sjiang.liu@intel.com case X86_CPU_IMPL_NEHALEM_EX:
136*12004Sjiang.liu@intel.com cache_str = "l3-cache-size";
137*12004Sjiang.liu@intel.com break;
138*12004Sjiang.liu@intel.com default:
139*12004Sjiang.liu@intel.com cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
140*12004Sjiang.liu@intel.com cp->sbc_cpu_impl);
141*12004Sjiang.liu@intel.com break;
142*12004Sjiang.liu@intel.com }
143*12004Sjiang.liu@intel.com
144*12004Sjiang.liu@intel.com if (cache_str != NULL) {
145*12004Sjiang.liu@intel.com /* read in the ecache size */
146*12004Sjiang.liu@intel.com /*
147*12004Sjiang.liu@intel.com * If the property is not found in the CPU node,
148*12004Sjiang.liu@intel.com * it has to be kept in the core or cmp node so
149*12004Sjiang.liu@intel.com * we just keep looking.
150*12004Sjiang.liu@intel.com */
151*12004Sjiang.liu@intel.com
152*12004Sjiang.liu@intel.com ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
153*12004Sjiang.liu@intel.com cache_str, 0);
154*12004Sjiang.liu@intel.com }
155*12004Sjiang.liu@intel.com
156*12004Sjiang.liu@intel.com /* convert to the proper units */
157*12004Sjiang.liu@intel.com cp->sbc_speed = (clock_freq + 500000) / 1000000;
158*12004Sjiang.liu@intel.com cp->sbc_ecache = ecache_size / (1024 * 1024);
159*12004Sjiang.liu@intel.com }
160*12004Sjiang.liu@intel.com
161*12004Sjiang.liu@intel.com void
dr_init_cpu_unit(dr_cpu_unit_t * cp)162*12004Sjiang.liu@intel.com dr_init_cpu_unit(dr_cpu_unit_t *cp)
163*12004Sjiang.liu@intel.com {
164*12004Sjiang.liu@intel.com sbd_error_t *err;
165*12004Sjiang.liu@intel.com dr_state_t new_state;
166*12004Sjiang.liu@intel.com int cpuid;
167*12004Sjiang.liu@intel.com int impl;
168*12004Sjiang.liu@intel.com
169*12004Sjiang.liu@intel.com if (DR_DEV_IS_ATTACHED(&cp->sbc_cm)) {
170*12004Sjiang.liu@intel.com new_state = DR_STATE_CONFIGURED;
171*12004Sjiang.liu@intel.com cp->sbc_cm.sbdev_cond = SBD_COND_OK;
172*12004Sjiang.liu@intel.com } else if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
173*12004Sjiang.liu@intel.com new_state = DR_STATE_CONNECTED;
174*12004Sjiang.liu@intel.com cp->sbc_cm.sbdev_cond = SBD_COND_OK;
175*12004Sjiang.liu@intel.com } else {
176*12004Sjiang.liu@intel.com new_state = DR_STATE_EMPTY;
177*12004Sjiang.liu@intel.com cp->sbc_cm.sbdev_cond = SBD_COND_UNKNOWN;
178*12004Sjiang.liu@intel.com }
179*12004Sjiang.liu@intel.com
180*12004Sjiang.liu@intel.com if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
181*12004Sjiang.liu@intel.com err = drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid);
182*12004Sjiang.liu@intel.com if (err) {
183*12004Sjiang.liu@intel.com DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
184*12004Sjiang.liu@intel.com new_state = DR_STATE_FATAL;
185*12004Sjiang.liu@intel.com goto done;
186*12004Sjiang.liu@intel.com }
187*12004Sjiang.liu@intel.com
188*12004Sjiang.liu@intel.com err = drmach_cpu_get_impl(cp->sbc_cm.sbdev_id, &impl);
189*12004Sjiang.liu@intel.com if (err) {
190*12004Sjiang.liu@intel.com DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
191*12004Sjiang.liu@intel.com new_state = DR_STATE_FATAL;
192*12004Sjiang.liu@intel.com goto done;
193*12004Sjiang.liu@intel.com }
194*12004Sjiang.liu@intel.com } else {
195*12004Sjiang.liu@intel.com cp->sbc_cpu_id = -1;
196*12004Sjiang.liu@intel.com cp->sbc_cpu_impl = -1;
197*12004Sjiang.liu@intel.com goto done;
198*12004Sjiang.liu@intel.com }
199*12004Sjiang.liu@intel.com
200*12004Sjiang.liu@intel.com cp->sbc_cpu_id = cpuid;
201*12004Sjiang.liu@intel.com cp->sbc_cpu_impl = impl;
202*12004Sjiang.liu@intel.com
203*12004Sjiang.liu@intel.com /* if true at init time, it must always be true */
204*12004Sjiang.liu@intel.com ASSERT(dr_cpu_unit_is_sane(cp->sbc_cm.sbdev_bp, cp));
205*12004Sjiang.liu@intel.com
206*12004Sjiang.liu@intel.com mutex_enter(&cpu_lock);
207*12004Sjiang.liu@intel.com if ((cpuid >= 0) && cpu[cpuid])
208*12004Sjiang.liu@intel.com cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
209*12004Sjiang.liu@intel.com else
210*12004Sjiang.liu@intel.com cp->sbc_cpu_flags = P_OFFLINE | P_POWEROFF;
211*12004Sjiang.liu@intel.com mutex_exit(&cpu_lock);
212*12004Sjiang.liu@intel.com
213*12004Sjiang.liu@intel.com dr_cpu_set_prop(cp);
214*12004Sjiang.liu@intel.com
215*12004Sjiang.liu@intel.com done:
216*12004Sjiang.liu@intel.com /* delay transition until fully initialized */
217*12004Sjiang.liu@intel.com dr_device_transition(&cp->sbc_cm, new_state);
218*12004Sjiang.liu@intel.com }
219*12004Sjiang.liu@intel.com
220*12004Sjiang.liu@intel.com int
dr_pre_attach_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)221*12004Sjiang.liu@intel.com dr_pre_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
222*12004Sjiang.liu@intel.com {
223*12004Sjiang.liu@intel.com int i;
224*12004Sjiang.liu@intel.com static fn_t f = "dr_pre_attach_cpu";
225*12004Sjiang.liu@intel.com
226*12004Sjiang.liu@intel.com PR_CPU("%s...\n", f);
227*12004Sjiang.liu@intel.com
228*12004Sjiang.liu@intel.com for (i = 0; i < devnum; i++) {
229*12004Sjiang.liu@intel.com dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
230*12004Sjiang.liu@intel.com
231*12004Sjiang.liu@intel.com ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
232*12004Sjiang.liu@intel.com
233*12004Sjiang.liu@intel.com /*
234*12004Sjiang.liu@intel.com * Print a console message for each attachment
235*12004Sjiang.liu@intel.com * point. For CMP devices, this means that only
236*12004Sjiang.liu@intel.com * one message should be printed, no matter how
237*12004Sjiang.liu@intel.com * many cores are actually present.
238*12004Sjiang.liu@intel.com */
239*12004Sjiang.liu@intel.com if ((up->sbc_cm.sbdev_unum % MAX_CORES_PER_CMP) == 0) {
240*12004Sjiang.liu@intel.com cmn_err(CE_CONT, "OS configure %s",
241*12004Sjiang.liu@intel.com up->sbc_cm.sbdev_path);
242*12004Sjiang.liu@intel.com }
243*12004Sjiang.liu@intel.com }
244*12004Sjiang.liu@intel.com
245*12004Sjiang.liu@intel.com /*
246*12004Sjiang.liu@intel.com * Block out status threads while creating
247*12004Sjiang.liu@intel.com * devinfo tree branches
248*12004Sjiang.liu@intel.com */
249*12004Sjiang.liu@intel.com dr_lock_status(hp->h_bd);
250*12004Sjiang.liu@intel.com ndi_devi_enter(ddi_root_node(), (int *)(&hp->h_ndi));
251*12004Sjiang.liu@intel.com mutex_enter(&cpu_lock);
252*12004Sjiang.liu@intel.com
253*12004Sjiang.liu@intel.com return (0);
254*12004Sjiang.liu@intel.com }
255*12004Sjiang.liu@intel.com
256*12004Sjiang.liu@intel.com /*ARGSUSED*/
257*12004Sjiang.liu@intel.com void
dr_attach_cpu(dr_handle_t * hp,dr_common_unit_t * cp)258*12004Sjiang.liu@intel.com dr_attach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
259*12004Sjiang.liu@intel.com {
260*12004Sjiang.liu@intel.com sbd_error_t *err;
261*12004Sjiang.liu@intel.com processorid_t cpuid;
262*12004Sjiang.liu@intel.com int rv;
263*12004Sjiang.liu@intel.com
264*12004Sjiang.liu@intel.com ASSERT(MUTEX_HELD(&cpu_lock));
265*12004Sjiang.liu@intel.com
266*12004Sjiang.liu@intel.com err = drmach_configure(cp->sbdev_id, 0);
267*12004Sjiang.liu@intel.com if (err) {
268*12004Sjiang.liu@intel.com DRERR_SET_C(&cp->sbdev_error, &err);
269*12004Sjiang.liu@intel.com return;
270*12004Sjiang.liu@intel.com }
271*12004Sjiang.liu@intel.com
272*12004Sjiang.liu@intel.com err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
273*12004Sjiang.liu@intel.com if (err) {
274*12004Sjiang.liu@intel.com DRERR_SET_C(&cp->sbdev_error, &err);
275*12004Sjiang.liu@intel.com
276*12004Sjiang.liu@intel.com err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
277*12004Sjiang.liu@intel.com if (err)
278*12004Sjiang.liu@intel.com sbd_err_clear(&err);
279*12004Sjiang.liu@intel.com } else if ((rv = cpu_configure(cpuid)) != 0) {
280*12004Sjiang.liu@intel.com dr_dev_err(CE_WARN, cp, dr_errno2ecode(rv));
281*12004Sjiang.liu@intel.com err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
282*12004Sjiang.liu@intel.com if (err)
283*12004Sjiang.liu@intel.com sbd_err_clear(&err);
284*12004Sjiang.liu@intel.com } else {
285*12004Sjiang.liu@intel.com dr_cpu_unit_t *up = (dr_cpu_unit_t *)cp;
286*12004Sjiang.liu@intel.com up->sbc_cpu_id = cpuid;
287*12004Sjiang.liu@intel.com }
288*12004Sjiang.liu@intel.com }
289*12004Sjiang.liu@intel.com
290*12004Sjiang.liu@intel.com /*
291*12004Sjiang.liu@intel.com * dr_post_attach_cpu
292*12004Sjiang.liu@intel.com *
293*12004Sjiang.liu@intel.com * sbd error policy: Does not stop on error. Processes all units in list.
294*12004Sjiang.liu@intel.com */
295*12004Sjiang.liu@intel.com int
dr_post_attach_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)296*12004Sjiang.liu@intel.com dr_post_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
297*12004Sjiang.liu@intel.com {
298*12004Sjiang.liu@intel.com int i;
299*12004Sjiang.liu@intel.com int errflag = 0;
300*12004Sjiang.liu@intel.com static fn_t f = "dr_post_attach_cpu";
301*12004Sjiang.liu@intel.com
302*12004Sjiang.liu@intel.com PR_CPU("%s...\n", f);
303*12004Sjiang.liu@intel.com
304*12004Sjiang.liu@intel.com /* Startup and online newly-attached CPUs */
305*12004Sjiang.liu@intel.com for (i = 0; i < devnum; i++) {
306*12004Sjiang.liu@intel.com dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
307*12004Sjiang.liu@intel.com struct cpu *cp;
308*12004Sjiang.liu@intel.com
309*12004Sjiang.liu@intel.com ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
310*12004Sjiang.liu@intel.com
311*12004Sjiang.liu@intel.com cp = cpu_get(up->sbc_cpu_id);
312*12004Sjiang.liu@intel.com if (cp == NULL) {
313*12004Sjiang.liu@intel.com cmn_err(CE_WARN, "%s: cpu_get failed for cpu %d",
314*12004Sjiang.liu@intel.com f, up->sbc_cpu_id);
315*12004Sjiang.liu@intel.com continue;
316*12004Sjiang.liu@intel.com }
317*12004Sjiang.liu@intel.com
318*12004Sjiang.liu@intel.com if (cpu_is_poweredoff(cp)) {
319*12004Sjiang.liu@intel.com if (cpu_poweron(cp) != 0) {
320*12004Sjiang.liu@intel.com dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTART);
321*12004Sjiang.liu@intel.com errflag = 1;
322*12004Sjiang.liu@intel.com }
323*12004Sjiang.liu@intel.com PR_CPU("%s: cpu %d powered ON\n", f, up->sbc_cpu_id);
324*12004Sjiang.liu@intel.com }
325*12004Sjiang.liu@intel.com
326*12004Sjiang.liu@intel.com if (cpu_is_offline(cp)) {
327*12004Sjiang.liu@intel.com PR_CPU("%s: onlining cpu %d...\n", f, up->sbc_cpu_id);
328*12004Sjiang.liu@intel.com
329*12004Sjiang.liu@intel.com if (cpu_online(cp) != 0) {
330*12004Sjiang.liu@intel.com dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_ONLINE);
331*12004Sjiang.liu@intel.com errflag = 1;
332*12004Sjiang.liu@intel.com }
333*12004Sjiang.liu@intel.com }
334*12004Sjiang.liu@intel.com
335*12004Sjiang.liu@intel.com }
336*12004Sjiang.liu@intel.com
337*12004Sjiang.liu@intel.com mutex_exit(&cpu_lock);
338*12004Sjiang.liu@intel.com ndi_devi_exit(ddi_root_node(), hp->h_ndi);
339*12004Sjiang.liu@intel.com dr_unlock_status(hp->h_bd);
340*12004Sjiang.liu@intel.com
341*12004Sjiang.liu@intel.com if (errflag)
342*12004Sjiang.liu@intel.com return (-1);
343*12004Sjiang.liu@intel.com else
344*12004Sjiang.liu@intel.com return (0);
345*12004Sjiang.liu@intel.com }
346*12004Sjiang.liu@intel.com
347*12004Sjiang.liu@intel.com /*
348*12004Sjiang.liu@intel.com * dr_pre_release_cpu
349*12004Sjiang.liu@intel.com *
350*12004Sjiang.liu@intel.com * sbd error policy: Stops on first error.
351*12004Sjiang.liu@intel.com */
352*12004Sjiang.liu@intel.com int
dr_pre_release_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)353*12004Sjiang.liu@intel.com dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
354*12004Sjiang.liu@intel.com {
355*12004Sjiang.liu@intel.com int c, cix, i, lastoffline = -1, rv = 0;
356*12004Sjiang.liu@intel.com processorid_t cpuid;
357*12004Sjiang.liu@intel.com struct cpu *cp;
358*12004Sjiang.liu@intel.com dr_cpu_unit_t *up;
359*12004Sjiang.liu@intel.com dr_devset_t devset;
360*12004Sjiang.liu@intel.com sbd_dev_stat_t *ds;
361*12004Sjiang.liu@intel.com static fn_t f = "dr_pre_release_cpu";
362*12004Sjiang.liu@intel.com int cpu_flags = 0;
363*12004Sjiang.liu@intel.com
364*12004Sjiang.liu@intel.com devset = DR_DEVS_PRESENT(hp->h_bd);
365*12004Sjiang.liu@intel.com
366*12004Sjiang.liu@intel.com /* allocate status struct storage. */
367*12004Sjiang.liu@intel.com ds = (sbd_dev_stat_t *) kmem_zalloc(sizeof (sbd_dev_stat_t) *
368*12004Sjiang.liu@intel.com MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
369*12004Sjiang.liu@intel.com
370*12004Sjiang.liu@intel.com cix = dr_cpu_status(hp, devset, ds);
371*12004Sjiang.liu@intel.com
372*12004Sjiang.liu@intel.com mutex_enter(&cpu_lock);
373*12004Sjiang.liu@intel.com
374*12004Sjiang.liu@intel.com for (i = 0; i < devnum; i++) {
375*12004Sjiang.liu@intel.com up = (dr_cpu_unit_t *)devlist[i];
376*12004Sjiang.liu@intel.com if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
377*12004Sjiang.liu@intel.com continue;
378*12004Sjiang.liu@intel.com }
379*12004Sjiang.liu@intel.com ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
380*12004Sjiang.liu@intel.com
381*12004Sjiang.liu@intel.com /*
382*12004Sjiang.liu@intel.com * On x86 systems, some CPUs can't be unconfigured.
383*12004Sjiang.liu@intel.com * For example, CPU0 can't be unconfigured because many other
384*12004Sjiang.liu@intel.com * components have a dependency on it.
385*12004Sjiang.liu@intel.com * This check determines if a CPU is currently in use and
386*12004Sjiang.liu@intel.com * returns a "Device busy" error if so.
387*12004Sjiang.liu@intel.com */
388*12004Sjiang.liu@intel.com for (c = 0; c < cix; c++) {
389*12004Sjiang.liu@intel.com if (ds[c].d_cpu.cs_unit == up->sbc_cm.sbdev_unum) {
390*12004Sjiang.liu@intel.com if (ds[c].d_cpu.cs_busy) {
391*12004Sjiang.liu@intel.com dr_dev_err(CE_WARN, &up->sbc_cm,
392*12004Sjiang.liu@intel.com ESBD_BUSY);
393*12004Sjiang.liu@intel.com rv = -1;
394*12004Sjiang.liu@intel.com break;
395*12004Sjiang.liu@intel.com }
396*12004Sjiang.liu@intel.com }
397*12004Sjiang.liu@intel.com }
398*12004Sjiang.liu@intel.com if (c < cix)
399*12004Sjiang.liu@intel.com break;
400*12004Sjiang.liu@intel.com
401*12004Sjiang.liu@intel.com cpuid = up->sbc_cpu_id;
402*12004Sjiang.liu@intel.com if ((cp = cpu_get(cpuid)) == NULL) {
403*12004Sjiang.liu@intel.com dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
404*12004Sjiang.liu@intel.com rv = -1;
405*12004Sjiang.liu@intel.com break;
406*12004Sjiang.liu@intel.com }
407*12004Sjiang.liu@intel.com
408*12004Sjiang.liu@intel.com /* used by dr_cancel_cpu during error flow */
409*12004Sjiang.liu@intel.com up->sbc_cpu_flags = cp->cpu_flags;
410*12004Sjiang.liu@intel.com
411*12004Sjiang.liu@intel.com if (CPU_ACTIVE(cp)) {
412*12004Sjiang.liu@intel.com if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
413*12004Sjiang.liu@intel.com cpu_flags = CPU_FORCED;
414*12004Sjiang.liu@intel.com
415*12004Sjiang.liu@intel.com PR_CPU("%s: offlining cpu %d\n", f, cpuid);
416*12004Sjiang.liu@intel.com if (cpu_offline(cp, cpu_flags)) {
417*12004Sjiang.liu@intel.com PR_CPU("%s: failed to offline cpu %d\n", f,
418*12004Sjiang.liu@intel.com cpuid);
419*12004Sjiang.liu@intel.com dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
420*12004Sjiang.liu@intel.com if (disp_bound_threads(cp, 0)) {
421*12004Sjiang.liu@intel.com cmn_err(CE_WARN, "%s: thread(s) bound "
422*12004Sjiang.liu@intel.com "to cpu %d", f, cp->cpu_id);
423*12004Sjiang.liu@intel.com }
424*12004Sjiang.liu@intel.com rv = -1;
425*12004Sjiang.liu@intel.com break;
426*12004Sjiang.liu@intel.com } else
427*12004Sjiang.liu@intel.com lastoffline = i;
428*12004Sjiang.liu@intel.com }
429*12004Sjiang.liu@intel.com
430*12004Sjiang.liu@intel.com if (!rv) {
431*12004Sjiang.liu@intel.com sbd_error_t *err;
432*12004Sjiang.liu@intel.com
433*12004Sjiang.liu@intel.com err = drmach_release(up->sbc_cm.sbdev_id);
434*12004Sjiang.liu@intel.com if (err) {
435*12004Sjiang.liu@intel.com DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
436*12004Sjiang.liu@intel.com rv = -1;
437*12004Sjiang.liu@intel.com break;
438*12004Sjiang.liu@intel.com }
439*12004Sjiang.liu@intel.com }
440*12004Sjiang.liu@intel.com }
441*12004Sjiang.liu@intel.com
442*12004Sjiang.liu@intel.com mutex_exit(&cpu_lock);
443*12004Sjiang.liu@intel.com
444*12004Sjiang.liu@intel.com if (rv) {
445*12004Sjiang.liu@intel.com /*
446*12004Sjiang.liu@intel.com * Need to unwind others since at this level (pre-release)
447*12004Sjiang.liu@intel.com * the device state has not yet transitioned and failures
448*12004Sjiang.liu@intel.com * will prevent us from reaching the "post" release
449*12004Sjiang.liu@intel.com * function where states are normally transitioned.
450*12004Sjiang.liu@intel.com */
451*12004Sjiang.liu@intel.com for (i = lastoffline; i >= 0; i--) {
452*12004Sjiang.liu@intel.com up = (dr_cpu_unit_t *)devlist[i];
453*12004Sjiang.liu@intel.com (void) dr_cancel_cpu(up);
454*12004Sjiang.liu@intel.com }
455*12004Sjiang.liu@intel.com }
456*12004Sjiang.liu@intel.com
457*12004Sjiang.liu@intel.com kmem_free(ds, sizeof (sbd_dev_stat_t) * MAX_CPU_UNITS_PER_BOARD);
458*12004Sjiang.liu@intel.com return (rv);
459*12004Sjiang.liu@intel.com }
460*12004Sjiang.liu@intel.com
461*12004Sjiang.liu@intel.com /*
462*12004Sjiang.liu@intel.com * dr_pre_detach_cpu
463*12004Sjiang.liu@intel.com *
464*12004Sjiang.liu@intel.com * sbd error policy: Stops on first error.
465*12004Sjiang.liu@intel.com */
466*12004Sjiang.liu@intel.com int
dr_pre_detach_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)467*12004Sjiang.liu@intel.com dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
468*12004Sjiang.liu@intel.com {
469*12004Sjiang.liu@intel.com _NOTE(ARGUNUSED(hp))
470*12004Sjiang.liu@intel.com
471*12004Sjiang.liu@intel.com int i;
472*12004Sjiang.liu@intel.com int cpu_flags = 0;
473*12004Sjiang.liu@intel.com static fn_t f = "dr_pre_detach_cpu";
474*12004Sjiang.liu@intel.com
475*12004Sjiang.liu@intel.com PR_CPU("%s...\n", f);
476*12004Sjiang.liu@intel.com
477*12004Sjiang.liu@intel.com /*
478*12004Sjiang.liu@intel.com * Block out status threads while destroying devinfo tree
479*12004Sjiang.liu@intel.com * branches
480*12004Sjiang.liu@intel.com */
481*12004Sjiang.liu@intel.com dr_lock_status(hp->h_bd);
482*12004Sjiang.liu@intel.com mutex_enter(&cpu_lock);
483*12004Sjiang.liu@intel.com
484*12004Sjiang.liu@intel.com for (i = 0; i < devnum; i++) {
485*12004Sjiang.liu@intel.com dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
486*12004Sjiang.liu@intel.com struct cpu *cp;
487*12004Sjiang.liu@intel.com
488*12004Sjiang.liu@intel.com if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
489*12004Sjiang.liu@intel.com continue;
490*12004Sjiang.liu@intel.com }
491*12004Sjiang.liu@intel.com
492*12004Sjiang.liu@intel.com ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
493*12004Sjiang.liu@intel.com
494*12004Sjiang.liu@intel.com cp = cpu_get(up->sbc_cpu_id);
495*12004Sjiang.liu@intel.com if (cp == NULL)
496*12004Sjiang.liu@intel.com continue;
497*12004Sjiang.liu@intel.com
498*12004Sjiang.liu@intel.com /*
499*12004Sjiang.liu@intel.com * Print a console message for each attachment
500*12004Sjiang.liu@intel.com * point. For CMP devices, this means that only
501*12004Sjiang.liu@intel.com * one message should be printed, no matter how
502*12004Sjiang.liu@intel.com * many cores are actually present.
503*12004Sjiang.liu@intel.com */
504*12004Sjiang.liu@intel.com if ((up->sbc_cm.sbdev_unum % MAX_CORES_PER_CMP) == 0) {
505*12004Sjiang.liu@intel.com cmn_err(CE_CONT, "OS unconfigure %s\n",
506*12004Sjiang.liu@intel.com up->sbc_cm.sbdev_path);
507*12004Sjiang.liu@intel.com }
508*12004Sjiang.liu@intel.com
509*12004Sjiang.liu@intel.com /*
510*12004Sjiang.liu@intel.com * CPUs were offlined during Release.
511*12004Sjiang.liu@intel.com */
512*12004Sjiang.liu@intel.com if (cpu_is_poweredoff(cp)) {
513*12004Sjiang.liu@intel.com PR_CPU("%s: cpu %d already powered OFF\n",
514*12004Sjiang.liu@intel.com f, up->sbc_cpu_id);
515*12004Sjiang.liu@intel.com continue;
516*12004Sjiang.liu@intel.com }
517*12004Sjiang.liu@intel.com
518*12004Sjiang.liu@intel.com if (!cpu_is_offline(cp)) {
519*12004Sjiang.liu@intel.com if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
520*12004Sjiang.liu@intel.com cpu_flags = CPU_FORCED;
521*12004Sjiang.liu@intel.com /* cpu was onlined after release. Offline it again */
522*12004Sjiang.liu@intel.com PR_CPU("%s: offlining cpu %d\n", f, up->sbc_cpu_id);
523*12004Sjiang.liu@intel.com if (cpu_offline(cp, cpu_flags)) {
524*12004Sjiang.liu@intel.com PR_CPU("%s: failed to offline cpu %d\n",
525*12004Sjiang.liu@intel.com f, up->sbc_cpu_id);
526*12004Sjiang.liu@intel.com dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
527*12004Sjiang.liu@intel.com if (disp_bound_threads(cp, 0)) {
528*12004Sjiang.liu@intel.com cmn_err(CE_WARN, "%s: thread(s) bound "
529*12004Sjiang.liu@intel.com "to cpu %d", f, cp->cpu_id);
530*12004Sjiang.liu@intel.com }
531*12004Sjiang.liu@intel.com goto err;
532*12004Sjiang.liu@intel.com }
533*12004Sjiang.liu@intel.com }
534*12004Sjiang.liu@intel.com if (cpu_poweroff(cp) != 0) {
535*12004Sjiang.liu@intel.com dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTOP);
536*12004Sjiang.liu@intel.com goto err;
537*12004Sjiang.liu@intel.com } else {
538*12004Sjiang.liu@intel.com PR_CPU("%s: cpu %d powered OFF\n", f, up->sbc_cpu_id);
539*12004Sjiang.liu@intel.com }
540*12004Sjiang.liu@intel.com }
541*12004Sjiang.liu@intel.com
542*12004Sjiang.liu@intel.com return (0);
543*12004Sjiang.liu@intel.com
544*12004Sjiang.liu@intel.com err:
545*12004Sjiang.liu@intel.com mutex_exit(&cpu_lock);
546*12004Sjiang.liu@intel.com dr_unlock_status(hp->h_bd);
547*12004Sjiang.liu@intel.com return (-1);
548*12004Sjiang.liu@intel.com }
549*12004Sjiang.liu@intel.com
550*12004Sjiang.liu@intel.com /*ARGSUSED*/
551*12004Sjiang.liu@intel.com void
dr_detach_cpu(dr_handle_t * hp,dr_common_unit_t * cp)552*12004Sjiang.liu@intel.com dr_detach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
553*12004Sjiang.liu@intel.com {
554*12004Sjiang.liu@intel.com sbd_error_t *err;
555*12004Sjiang.liu@intel.com processorid_t cpuid;
556*12004Sjiang.liu@intel.com int rv;
557*12004Sjiang.liu@intel.com dr_cpu_unit_t *up = (dr_cpu_unit_t *)cp;
558*12004Sjiang.liu@intel.com
559*12004Sjiang.liu@intel.com ASSERT(MUTEX_HELD(&cpu_lock));
560*12004Sjiang.liu@intel.com
561*12004Sjiang.liu@intel.com if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
562*12004Sjiang.liu@intel.com return;
563*12004Sjiang.liu@intel.com }
564*12004Sjiang.liu@intel.com
565*12004Sjiang.liu@intel.com err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
566*12004Sjiang.liu@intel.com if (err) {
567*12004Sjiang.liu@intel.com DRERR_SET_C(&cp->sbdev_error, &err);
568*12004Sjiang.liu@intel.com } else if ((rv = cpu_unconfigure(cpuid)) != 0) {
569*12004Sjiang.liu@intel.com dr_dev_err(CE_IGNORE, cp, dr_errno2ecode(rv));
570*12004Sjiang.liu@intel.com } else {
571*12004Sjiang.liu@intel.com err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
572*12004Sjiang.liu@intel.com if (err) {
573*12004Sjiang.liu@intel.com DRERR_SET_C(&cp->sbdev_error, &err);
574*12004Sjiang.liu@intel.com } else {
575*12004Sjiang.liu@intel.com up->sbc_cpu_id = -1;
576*12004Sjiang.liu@intel.com }
577*12004Sjiang.liu@intel.com }
578*12004Sjiang.liu@intel.com }
579*12004Sjiang.liu@intel.com
580*12004Sjiang.liu@intel.com /*ARGSUSED1*/
581*12004Sjiang.liu@intel.com int
dr_post_detach_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)582*12004Sjiang.liu@intel.com dr_post_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
583*12004Sjiang.liu@intel.com {
584*12004Sjiang.liu@intel.com static fn_t f = "dr_post_detach_cpu";
585*12004Sjiang.liu@intel.com
586*12004Sjiang.liu@intel.com PR_CPU("%s...\n", f);
587*12004Sjiang.liu@intel.com hp->h_ndi = 0;
588*12004Sjiang.liu@intel.com
589*12004Sjiang.liu@intel.com mutex_exit(&cpu_lock);
590*12004Sjiang.liu@intel.com dr_unlock_status(hp->h_bd);
591*12004Sjiang.liu@intel.com
592*12004Sjiang.liu@intel.com return (0);
593*12004Sjiang.liu@intel.com }
594*12004Sjiang.liu@intel.com
595*12004Sjiang.liu@intel.com static void
dr_fill_cpu_stat(dr_cpu_unit_t * cp,drmach_status_t * pstat,sbd_cpu_stat_t * csp)596*12004Sjiang.liu@intel.com dr_fill_cpu_stat(dr_cpu_unit_t *cp, drmach_status_t *pstat, sbd_cpu_stat_t *csp)
597*12004Sjiang.liu@intel.com {
598*12004Sjiang.liu@intel.com ASSERT(cp && pstat && csp);
599*12004Sjiang.liu@intel.com
600*12004Sjiang.liu@intel.com /* Fill in the common status information */
601*12004Sjiang.liu@intel.com bzero((caddr_t)csp, sizeof (*csp));
602*12004Sjiang.liu@intel.com csp->cs_type = cp->sbc_cm.sbdev_type;
603*12004Sjiang.liu@intel.com csp->cs_unit = cp->sbc_cm.sbdev_unum;
604*12004Sjiang.liu@intel.com (void) strlcpy(csp->cs_name, pstat->type, sizeof (csp->cs_name));
605*12004Sjiang.liu@intel.com csp->cs_cond = cp->sbc_cm.sbdev_cond;
606*12004Sjiang.liu@intel.com csp->cs_busy = cp->sbc_cm.sbdev_busy | pstat->busy;
607*12004Sjiang.liu@intel.com csp->cs_time = cp->sbc_cm.sbdev_time;
608*12004Sjiang.liu@intel.com csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
609*12004Sjiang.liu@intel.com csp->cs_suspend = 0;
610*12004Sjiang.liu@intel.com
611*12004Sjiang.liu@intel.com /* CPU specific status data */
612*12004Sjiang.liu@intel.com csp->cs_cpuid = cp->sbc_cpu_id;
613*12004Sjiang.liu@intel.com
614*12004Sjiang.liu@intel.com /*
615*12004Sjiang.liu@intel.com * If the speed and ecache properties have not been
616*12004Sjiang.liu@intel.com * cached yet, read them in from the device tree.
617*12004Sjiang.liu@intel.com */
618*12004Sjiang.liu@intel.com if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
619*12004Sjiang.liu@intel.com dr_cpu_set_prop(cp);
620*12004Sjiang.liu@intel.com
621*12004Sjiang.liu@intel.com /* use the cached speed and ecache values */
622*12004Sjiang.liu@intel.com csp->cs_speed = cp->sbc_speed;
623*12004Sjiang.liu@intel.com csp->cs_ecache = cp->sbc_ecache;
624*12004Sjiang.liu@intel.com
625*12004Sjiang.liu@intel.com mutex_enter(&cpu_lock);
626*12004Sjiang.liu@intel.com if (!cpu_get(csp->cs_cpuid)) {
627*12004Sjiang.liu@intel.com /* ostate must be UNCONFIGURED */
628*12004Sjiang.liu@intel.com csp->cs_cm.c_ostate = SBD_STAT_UNCONFIGURED;
629*12004Sjiang.liu@intel.com }
630*12004Sjiang.liu@intel.com mutex_exit(&cpu_lock);
631*12004Sjiang.liu@intel.com }
632*12004Sjiang.liu@intel.com
633*12004Sjiang.liu@intel.com /*ARGSUSED2*/
634*12004Sjiang.liu@intel.com static void
dr_fill_cmp_stat(sbd_cpu_stat_t * csp,int ncores,int impl,sbd_cmp_stat_t * psp)635*12004Sjiang.liu@intel.com dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
636*12004Sjiang.liu@intel.com {
637*12004Sjiang.liu@intel.com int core;
638*12004Sjiang.liu@intel.com
639*12004Sjiang.liu@intel.com ASSERT(csp && psp && (ncores >= 1));
640*12004Sjiang.liu@intel.com
641*12004Sjiang.liu@intel.com bzero((caddr_t)psp, sizeof (*psp));
642*12004Sjiang.liu@intel.com
643*12004Sjiang.liu@intel.com /*
644*12004Sjiang.liu@intel.com * Fill in the common status information based
645*12004Sjiang.liu@intel.com * on the data for the first core.
646*12004Sjiang.liu@intel.com */
647*12004Sjiang.liu@intel.com psp->ps_type = SBD_COMP_CMP;
648*12004Sjiang.liu@intel.com psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit, SBD_COMP_CMP);
649*12004Sjiang.liu@intel.com (void) strlcpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
650*12004Sjiang.liu@intel.com psp->ps_cond = csp->cs_cond;
651*12004Sjiang.liu@intel.com psp->ps_busy = csp->cs_busy;
652*12004Sjiang.liu@intel.com psp->ps_time = csp->cs_time;
653*12004Sjiang.liu@intel.com psp->ps_ostate = csp->cs_ostate;
654*12004Sjiang.liu@intel.com psp->ps_suspend = csp->cs_suspend;
655*12004Sjiang.liu@intel.com
656*12004Sjiang.liu@intel.com /* CMP specific status data */
657*12004Sjiang.liu@intel.com *psp->ps_cpuid = csp->cs_cpuid;
658*12004Sjiang.liu@intel.com psp->ps_ncores = 1;
659*12004Sjiang.liu@intel.com psp->ps_speed = csp->cs_speed;
660*12004Sjiang.liu@intel.com psp->ps_ecache = csp->cs_ecache;
661*12004Sjiang.liu@intel.com
662*12004Sjiang.liu@intel.com /*
663*12004Sjiang.liu@intel.com * Walk through the data for the remaining cores.
664*12004Sjiang.liu@intel.com * Make any adjustments to the common status data,
665*12004Sjiang.liu@intel.com * or the shared CMP specific data if necessary.
666*12004Sjiang.liu@intel.com */
667*12004Sjiang.liu@intel.com for (core = 1; core < ncores; core++) {
668*12004Sjiang.liu@intel.com /*
669*12004Sjiang.liu@intel.com * The following properties should be the same
670*12004Sjiang.liu@intel.com * for all the cores of the CMP.
671*12004Sjiang.liu@intel.com */
672*12004Sjiang.liu@intel.com ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(csp[core].cs_unit,
673*12004Sjiang.liu@intel.com SBD_COMP_CMP));
674*12004Sjiang.liu@intel.com
675*12004Sjiang.liu@intel.com if (csp[core].cs_speed > psp->ps_speed)
676*12004Sjiang.liu@intel.com psp->ps_speed = csp[core].cs_speed;
677*12004Sjiang.liu@intel.com if (csp[core].cs_ecache > psp->ps_ecache)
678*12004Sjiang.liu@intel.com psp->ps_ecache = csp[core].cs_ecache;
679*12004Sjiang.liu@intel.com
680*12004Sjiang.liu@intel.com psp->ps_cpuid[core] = csp[core].cs_cpuid;
681*12004Sjiang.liu@intel.com psp->ps_ncores++;
682*12004Sjiang.liu@intel.com
683*12004Sjiang.liu@intel.com /* adjust time if necessary */
684*12004Sjiang.liu@intel.com if (csp[core].cs_time > psp->ps_time) {
685*12004Sjiang.liu@intel.com psp->ps_time = csp[core].cs_time;
686*12004Sjiang.liu@intel.com }
687*12004Sjiang.liu@intel.com
688*12004Sjiang.liu@intel.com psp->ps_busy |= csp[core].cs_busy;
689*12004Sjiang.liu@intel.com
690*12004Sjiang.liu@intel.com /*
691*12004Sjiang.liu@intel.com * If any of the cores are configured, the
692*12004Sjiang.liu@intel.com * entire CMP is marked as configured.
693*12004Sjiang.liu@intel.com */
694*12004Sjiang.liu@intel.com if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
695*12004Sjiang.liu@intel.com psp->ps_ostate = csp[core].cs_ostate;
696*12004Sjiang.liu@intel.com }
697*12004Sjiang.liu@intel.com }
698*12004Sjiang.liu@intel.com }
699*12004Sjiang.liu@intel.com
700*12004Sjiang.liu@intel.com int
dr_cpu_status(dr_handle_t * hp,dr_devset_t devset,sbd_dev_stat_t * dsp)701*12004Sjiang.liu@intel.com dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
702*12004Sjiang.liu@intel.com {
703*12004Sjiang.liu@intel.com int cmp;
704*12004Sjiang.liu@intel.com int core;
705*12004Sjiang.liu@intel.com int ncpu;
706*12004Sjiang.liu@intel.com dr_board_t *bp;
707*12004Sjiang.liu@intel.com sbd_cpu_stat_t *cstat;
708*12004Sjiang.liu@intel.com int impl;
709*12004Sjiang.liu@intel.com
710*12004Sjiang.liu@intel.com bp = hp->h_bd;
711*12004Sjiang.liu@intel.com ncpu = 0;
712*12004Sjiang.liu@intel.com
713*12004Sjiang.liu@intel.com devset &= DR_DEVS_PRESENT(bp);
714*12004Sjiang.liu@intel.com cstat = kmem_zalloc(sizeof (sbd_cpu_stat_t) * MAX_CORES_PER_CMP,
715*12004Sjiang.liu@intel.com KM_SLEEP);
716*12004Sjiang.liu@intel.com
717*12004Sjiang.liu@intel.com /*
718*12004Sjiang.liu@intel.com * Treat every CPU as a CMP. In the case where the
719*12004Sjiang.liu@intel.com * device is not a CMP, treat it as a CMP with only
720*12004Sjiang.liu@intel.com * one core.
721*12004Sjiang.liu@intel.com */
722*12004Sjiang.liu@intel.com for (cmp = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
723*12004Sjiang.liu@intel.com int ncores;
724*12004Sjiang.liu@intel.com dr_cpu_unit_t *cp;
725*12004Sjiang.liu@intel.com drmach_status_t pstat;
726*12004Sjiang.liu@intel.com sbd_error_t *err;
727*12004Sjiang.liu@intel.com sbd_cmp_stat_t *psp;
728*12004Sjiang.liu@intel.com
729*12004Sjiang.liu@intel.com if ((devset & DEVSET(SBD_COMP_CMP, cmp)) == 0) {
730*12004Sjiang.liu@intel.com continue;
731*12004Sjiang.liu@intel.com }
732*12004Sjiang.liu@intel.com
733*12004Sjiang.liu@intel.com ncores = 0;
734*12004Sjiang.liu@intel.com
735*12004Sjiang.liu@intel.com for (core = 0; core < MAX_CORES_PER_CMP; core++) {
736*12004Sjiang.liu@intel.com
737*12004Sjiang.liu@intel.com cp = dr_get_cpu_unit(bp, DR_CMP_CORE_UNUM(cmp, core));
738*12004Sjiang.liu@intel.com
739*12004Sjiang.liu@intel.com if (cp->sbc_cm.sbdev_state == DR_STATE_EMPTY) {
740*12004Sjiang.liu@intel.com /* present, but not fully initialized */
741*12004Sjiang.liu@intel.com continue;
742*12004Sjiang.liu@intel.com }
743*12004Sjiang.liu@intel.com
744*12004Sjiang.liu@intel.com ASSERT(dr_cpu_unit_is_sane(hp->h_bd, cp));
745*12004Sjiang.liu@intel.com
746*12004Sjiang.liu@intel.com /* skip if not present */
747*12004Sjiang.liu@intel.com if (cp->sbc_cm.sbdev_id == (drmachid_t)0) {
748*12004Sjiang.liu@intel.com continue;
749*12004Sjiang.liu@intel.com }
750*12004Sjiang.liu@intel.com
751*12004Sjiang.liu@intel.com /* fetch platform status */
752*12004Sjiang.liu@intel.com err = drmach_status(cp->sbc_cm.sbdev_id, &pstat);
753*12004Sjiang.liu@intel.com if (err) {
754*12004Sjiang.liu@intel.com DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
755*12004Sjiang.liu@intel.com continue;
756*12004Sjiang.liu@intel.com }
757*12004Sjiang.liu@intel.com
758*12004Sjiang.liu@intel.com dr_fill_cpu_stat(cp, &pstat, &cstat[ncores++]);
759*12004Sjiang.liu@intel.com /*
760*12004Sjiang.liu@intel.com * We should set impl here because the last core
761*12004Sjiang.liu@intel.com * found might be EMPTY or not present.
762*12004Sjiang.liu@intel.com */
763*12004Sjiang.liu@intel.com impl = cp->sbc_cpu_impl;
764*12004Sjiang.liu@intel.com }
765*12004Sjiang.liu@intel.com
766*12004Sjiang.liu@intel.com if (ncores == 0) {
767*12004Sjiang.liu@intel.com continue;
768*12004Sjiang.liu@intel.com }
769*12004Sjiang.liu@intel.com
770*12004Sjiang.liu@intel.com /*
771*12004Sjiang.liu@intel.com * Store the data to the outgoing array. If the
772*12004Sjiang.liu@intel.com * device is a CMP, combine all the data for the
773*12004Sjiang.liu@intel.com * cores into a single stat structure.
774*12004Sjiang.liu@intel.com *
775*12004Sjiang.liu@intel.com * The check for a CMP device uses the last core
776*12004Sjiang.liu@intel.com * found, assuming that all cores will have the
777*12004Sjiang.liu@intel.com * same implementation.
778*12004Sjiang.liu@intel.com */
779*12004Sjiang.liu@intel.com if (CPU_IMPL_IS_CMP(impl)) {
780*12004Sjiang.liu@intel.com psp = (sbd_cmp_stat_t *)dsp;
781*12004Sjiang.liu@intel.com dr_fill_cmp_stat(cstat, ncores, impl, psp);
782*12004Sjiang.liu@intel.com } else {
783*12004Sjiang.liu@intel.com ASSERT(ncores == 1);
784*12004Sjiang.liu@intel.com bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
785*12004Sjiang.liu@intel.com }
786*12004Sjiang.liu@intel.com
787*12004Sjiang.liu@intel.com dsp++;
788*12004Sjiang.liu@intel.com ncpu++;
789*12004Sjiang.liu@intel.com }
790*12004Sjiang.liu@intel.com
791*12004Sjiang.liu@intel.com kmem_free(cstat, sizeof (sbd_cpu_stat_t) * MAX_CORES_PER_CMP);
792*12004Sjiang.liu@intel.com
793*12004Sjiang.liu@intel.com return (ncpu);
794*12004Sjiang.liu@intel.com }
795*12004Sjiang.liu@intel.com
796*12004Sjiang.liu@intel.com /*
797*12004Sjiang.liu@intel.com * Cancel previous release operation for cpu.
798*12004Sjiang.liu@intel.com * For cpus this means simply bringing cpus that
799*12004Sjiang.liu@intel.com * were offline back online. Note that they had
800*12004Sjiang.liu@intel.com * to have been online at the time there were
801*12004Sjiang.liu@intel.com * released.
802*12004Sjiang.liu@intel.com */
803*12004Sjiang.liu@intel.com int
dr_cancel_cpu(dr_cpu_unit_t * up)804*12004Sjiang.liu@intel.com dr_cancel_cpu(dr_cpu_unit_t *up)
805*12004Sjiang.liu@intel.com {
806*12004Sjiang.liu@intel.com int rv = 0;
807*12004Sjiang.liu@intel.com static fn_t f = "dr_cancel_cpu";
808*12004Sjiang.liu@intel.com
809*12004Sjiang.liu@intel.com ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
810*12004Sjiang.liu@intel.com
811*12004Sjiang.liu@intel.com if (cpu_flagged_active(up->sbc_cpu_flags)) {
812*12004Sjiang.liu@intel.com struct cpu *cp;
813*12004Sjiang.liu@intel.com
814*12004Sjiang.liu@intel.com /*
815*12004Sjiang.liu@intel.com * CPU had been online, go ahead
816*12004Sjiang.liu@intel.com * bring it back online.
817*12004Sjiang.liu@intel.com */
818*12004Sjiang.liu@intel.com PR_CPU("%s: bringing cpu %d back ONLINE\n", f, up->sbc_cpu_id);
819*12004Sjiang.liu@intel.com
820*12004Sjiang.liu@intel.com mutex_enter(&cpu_lock);
821*12004Sjiang.liu@intel.com cp = cpu[up->sbc_cpu_id];
822*12004Sjiang.liu@intel.com
823*12004Sjiang.liu@intel.com if (cpu_is_poweredoff(cp)) {
824*12004Sjiang.liu@intel.com if (cpu_poweron(cp)) {
825*12004Sjiang.liu@intel.com cmn_err(CE_WARN, "%s: failed to power-on "
826*12004Sjiang.liu@intel.com "cpu %d", f, up->sbc_cpu_id);
827*12004Sjiang.liu@intel.com rv = -1;
828*12004Sjiang.liu@intel.com }
829*12004Sjiang.liu@intel.com }
830*12004Sjiang.liu@intel.com
831*12004Sjiang.liu@intel.com if (rv == 0 && cpu_is_offline(cp)) {
832*12004Sjiang.liu@intel.com if (cpu_online(cp)) {
833*12004Sjiang.liu@intel.com cmn_err(CE_WARN, "%s: failed to online cpu %d",
834*12004Sjiang.liu@intel.com f, up->sbc_cpu_id);
835*12004Sjiang.liu@intel.com rv = -1;
836*12004Sjiang.liu@intel.com }
837*12004Sjiang.liu@intel.com }
838*12004Sjiang.liu@intel.com
839*12004Sjiang.liu@intel.com if (rv == 0 && cpu_is_online(cp)) {
840*12004Sjiang.liu@intel.com if (cpu_flagged_nointr(up->sbc_cpu_flags)) {
841*12004Sjiang.liu@intel.com if (cpu_intr_disable(cp) != 0) {
842*12004Sjiang.liu@intel.com cmn_err(CE_WARN, "%s: failed to "
843*12004Sjiang.liu@intel.com "disable interrupts on cpu %d", f,
844*12004Sjiang.liu@intel.com up->sbc_cpu_id);
845*12004Sjiang.liu@intel.com }
846*12004Sjiang.liu@intel.com }
847*12004Sjiang.liu@intel.com }
848*12004Sjiang.liu@intel.com
849*12004Sjiang.liu@intel.com mutex_exit(&cpu_lock);
850*12004Sjiang.liu@intel.com }
851*12004Sjiang.liu@intel.com
852*12004Sjiang.liu@intel.com return (rv);
853*12004Sjiang.liu@intel.com }
854*12004Sjiang.liu@intel.com
855*12004Sjiang.liu@intel.com int
dr_disconnect_cpu(dr_cpu_unit_t * up)856*12004Sjiang.liu@intel.com dr_disconnect_cpu(dr_cpu_unit_t *up)
857*12004Sjiang.liu@intel.com {
858*12004Sjiang.liu@intel.com sbd_error_t *err;
859*12004Sjiang.liu@intel.com static fn_t f = "dr_disconnect_cpu";
860*12004Sjiang.liu@intel.com
861*12004Sjiang.liu@intel.com PR_CPU("%s...\n", f);
862*12004Sjiang.liu@intel.com
863*12004Sjiang.liu@intel.com ASSERT((up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) ||
864*12004Sjiang.liu@intel.com (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED));
865*12004Sjiang.liu@intel.com
866*12004Sjiang.liu@intel.com ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
867*12004Sjiang.liu@intel.com
868*12004Sjiang.liu@intel.com if (up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) {
869*12004Sjiang.liu@intel.com /*
870*12004Sjiang.liu@intel.com * Cpus were never brought in and so are still
871*12004Sjiang.liu@intel.com * effectively disconnected, so nothing to do here.
872*12004Sjiang.liu@intel.com */
873*12004Sjiang.liu@intel.com PR_CPU("%s: cpu %d never brought in\n", f, up->sbc_cpu_id);
874*12004Sjiang.liu@intel.com return (0);
875*12004Sjiang.liu@intel.com }
876*12004Sjiang.liu@intel.com
877*12004Sjiang.liu@intel.com err = drmach_cpu_disconnect(up->sbc_cm.sbdev_id);
878*12004Sjiang.liu@intel.com if (err == NULL)
879*12004Sjiang.liu@intel.com return (0);
880*12004Sjiang.liu@intel.com else {
881*12004Sjiang.liu@intel.com DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
882*12004Sjiang.liu@intel.com return (-1);
883*12004Sjiang.liu@intel.com }
884*12004Sjiang.liu@intel.com /*NOTREACHED*/
885*12004Sjiang.liu@intel.com }
886