xref: /onnv-gate/usr/src/uts/i86pc/os/mp_machdep.c (revision 9652:6b40e106879c)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
52006Sandrei  * Common Development and Distribution License (the "License").
62006Sandrei  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
228675SVikram.Hegde@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
25*9652SMichael.Corcoran@Sun.COM /*
26*9652SMichael.Corcoran@Sun.COM  * Copyright (c) 2009, Intel Corporation.
27*9652SMichael.Corcoran@Sun.COM  * All rights reserved.
28*9652SMichael.Corcoran@Sun.COM  */
290Sstevel@tonic-gate 
305295Srandyf #define	PSMI_1_6
310Sstevel@tonic-gate #include <sys/smp_impldefs.h>
320Sstevel@tonic-gate #include <sys/psm.h>
330Sstevel@tonic-gate #include <sys/psm_modctl.h>
340Sstevel@tonic-gate #include <sys/pit.h>
350Sstevel@tonic-gate #include <sys/cmn_err.h>
360Sstevel@tonic-gate #include <sys/strlog.h>
370Sstevel@tonic-gate #include <sys/clock.h>
380Sstevel@tonic-gate #include <sys/debug.h>
390Sstevel@tonic-gate #include <sys/rtc.h>
400Sstevel@tonic-gate #include <sys/x86_archext.h>
410Sstevel@tonic-gate #include <sys/cpupart.h>
420Sstevel@tonic-gate #include <sys/cpuvar.h>
439637SRandy.Fishel@Sun.COM #include <sys/cpu_event.h>
444606Sesaxe #include <sys/cmt.h>
454481Sbholler #include <sys/cpu.h>
460Sstevel@tonic-gate #include <sys/disp.h>
470Sstevel@tonic-gate #include <sys/archsystm.h>
483446Smrj #include <sys/machsystm.h>
494481Sbholler #include <sys/sysmacros.h>
505084Sjohnlev #include <sys/memlist.h>
513446Smrj #include <sys/param.h>
523446Smrj #include <sys/promif.h>
538906SEric.Saxe@Sun.COM #include <sys/cpu_pm.h>
545084Sjohnlev #if defined(__xpv)
555084Sjohnlev #include <sys/hypervisor.h>
565084Sjohnlev #endif
57916Sschwartz #include <sys/mach_intr.h>
584481Sbholler #include <vm/hat_i86.h>
594652Scwb #include <sys/kdi_machimpl.h>
605864Sesaxe #include <sys/sdt.h>
618906SEric.Saxe@Sun.COM #include <sys/hpet.h>
62*9652SMichael.Corcoran@Sun.COM #include <sys/sunddi.h>
63*9652SMichael.Corcoran@Sun.COM #include <sys/sunndi.h>
640Sstevel@tonic-gate 
650Sstevel@tonic-gate #define	OFFSETOF(s, m)		(size_t)(&(((s *)0)->m))
660Sstevel@tonic-gate 
670Sstevel@tonic-gate /*
680Sstevel@tonic-gate  *	Local function prototypes
690Sstevel@tonic-gate  */
700Sstevel@tonic-gate static int mp_disable_intr(processorid_t cpun);
710Sstevel@tonic-gate static void mp_enable_intr(processorid_t cpun);
720Sstevel@tonic-gate static void mach_init();
730Sstevel@tonic-gate static void mach_picinit();
740Sstevel@tonic-gate static int machhztomhz(uint64_t cpu_freq_hz);
750Sstevel@tonic-gate static uint64_t mach_getcpufreq(void);
760Sstevel@tonic-gate static void mach_fixcpufreq(void);
770Sstevel@tonic-gate static int mach_clkinit(int, int *);
780Sstevel@tonic-gate static void mach_smpinit(void);
790Sstevel@tonic-gate static int mach_softlvl_to_vect(int ipl);
800Sstevel@tonic-gate static void mach_get_platform(int owner);
810Sstevel@tonic-gate static void mach_construct_info();
820Sstevel@tonic-gate static int mach_translate_irq(dev_info_t *dip, int irqno);
830Sstevel@tonic-gate static int mach_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *,
840Sstevel@tonic-gate     psm_intr_op_t, int *);
850Sstevel@tonic-gate static void mach_notify_error(int level, char *errmsg);
860Sstevel@tonic-gate static hrtime_t dummy_hrtime(void);
870Sstevel@tonic-gate static void dummy_scalehrtime(hrtime_t *);
888906SEric.Saxe@Sun.COM void cpu_idle(void);
890Sstevel@tonic-gate static void cpu_wakeup(cpu_t *, int);
905084Sjohnlev #ifndef __xpv
918906SEric.Saxe@Sun.COM void cpu_idle_mwait(void);
924481Sbholler static void cpu_wakeup_mwait(cpu_t *, int);
935084Sjohnlev #endif
94*9652SMichael.Corcoran@Sun.COM static int mach_cpu_create_devinfo(cpu_t *cp, dev_info_t **dipp);
95*9652SMichael.Corcoran@Sun.COM 
960Sstevel@tonic-gate /*
970Sstevel@tonic-gate  *	External reference functions
980Sstevel@tonic-gate  */
990Sstevel@tonic-gate extern void return_instr();
1000Sstevel@tonic-gate extern uint64_t freq_tsc(uint32_t *);
1010Sstevel@tonic-gate #if defined(__i386)
1020Sstevel@tonic-gate extern uint64_t freq_notsc(uint32_t *);
1030Sstevel@tonic-gate #endif
1040Sstevel@tonic-gate extern void pc_gethrestime(timestruc_t *);
1053434Sesaxe extern int cpuid_get_coreid(cpu_t *);
1063434Sesaxe extern int cpuid_get_chipid(cpu_t *);
1070Sstevel@tonic-gate 
1080Sstevel@tonic-gate /*
1090Sstevel@tonic-gate  *	PSM functions initialization
1100Sstevel@tonic-gate  */
1113446Smrj void (*psm_shutdownf)(int, int)	= (void (*)(int, int))return_instr;
1123446Smrj void (*psm_preshutdownf)(int, int) = (void (*)(int, int))return_instr;
1133446Smrj void (*psm_notifyf)(int)	= (void (*)(int))return_instr;
1143446Smrj void (*psm_set_idle_cpuf)(int)	= (void (*)(int))return_instr;
1153446Smrj void (*psm_unset_idle_cpuf)(int) = (void (*)(int))return_instr;
1160Sstevel@tonic-gate void (*psminitf)()		= mach_init;
1170Sstevel@tonic-gate void (*picinitf)() 		= return_instr;
1180Sstevel@tonic-gate int (*clkinitf)(int, int *) 	= (int (*)(int, int *))return_instr;
1190Sstevel@tonic-gate int (*ap_mlsetup)() 		= (int (*)(void))return_instr;
1200Sstevel@tonic-gate void (*send_dirintf)() 		= return_instr;
1213446Smrj void (*setspl)(int)		= (void (*)(int))return_instr;
1220Sstevel@tonic-gate int (*addspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr;
1230Sstevel@tonic-gate int (*delspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr;
1244652Scwb void (*kdisetsoftint)(int, struct av_softinfo *)=
1254652Scwb 	(void (*)(int, struct av_softinfo *))return_instr;
126999Slq150181 void (*setsoftint)(int, struct av_softinfo *)=
127999Slq150181 	(void (*)(int, struct av_softinfo *))return_instr;
1280Sstevel@tonic-gate int (*slvltovect)(int)		= (int (*)(int))return_instr;
1290Sstevel@tonic-gate int (*setlvl)(int, int *)	= (int (*)(int, int *))return_instr;
1300Sstevel@tonic-gate void (*setlvlx)(int, int)	= (void (*)(int, int))return_instr;
1310Sstevel@tonic-gate int (*psm_disable_intr)(int)	= mp_disable_intr;
1320Sstevel@tonic-gate void (*psm_enable_intr)(int)	= mp_enable_intr;
1330Sstevel@tonic-gate hrtime_t (*gethrtimef)(void)	= dummy_hrtime;
1340Sstevel@tonic-gate hrtime_t (*gethrtimeunscaledf)(void)	= dummy_hrtime;
1350Sstevel@tonic-gate void (*scalehrtimef)(hrtime_t *)	= dummy_scalehrtime;
1360Sstevel@tonic-gate int (*psm_translate_irq)(dev_info_t *, int) = mach_translate_irq;
1370Sstevel@tonic-gate void (*gethrestimef)(timestruc_t *) = pc_gethrestime;
1380Sstevel@tonic-gate void (*psm_notify_error)(int, char *) = (void (*)(int, char *))NULL;
1390Sstevel@tonic-gate int (*psm_get_clockirq)(int) = NULL;
1400Sstevel@tonic-gate int (*psm_get_ipivect)(int, int) = NULL;
1410Sstevel@tonic-gate 
1420Sstevel@tonic-gate int (*psm_clkinit)(int) = NULL;
1430Sstevel@tonic-gate void (*psm_timer_reprogram)(hrtime_t) = NULL;
1440Sstevel@tonic-gate void (*psm_timer_enable)(void) = NULL;
1450Sstevel@tonic-gate void (*psm_timer_disable)(void) = NULL;
1460Sstevel@tonic-gate void (*psm_post_cyclic_setup)(void *arg) = NULL;
1470Sstevel@tonic-gate int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, psm_intr_op_t,
1480Sstevel@tonic-gate     int *) = mach_intr_ops;
1495295Srandyf int (*psm_state)(psm_state_request_t *) = (int (*)(psm_state_request_t *))
1505295Srandyf     return_instr;
1510Sstevel@tonic-gate 
1520Sstevel@tonic-gate void (*notify_error)(int, char *) = (void (*)(int, char *))return_instr;
1530Sstevel@tonic-gate void (*hrtime_tick)(void)	= return_instr;
1540Sstevel@tonic-gate 
155*9652SMichael.Corcoran@Sun.COM int (*psm_cpu_create_devinfo)(cpu_t *, dev_info_t **) = mach_cpu_create_devinfo;
156*9652SMichael.Corcoran@Sun.COM 
1575084Sjohnlev /*
1585084Sjohnlev  * True if the generic TSC code is our source of hrtime, rather than whatever
1595084Sjohnlev  * the PSM can provide.
1605084Sjohnlev  */
1615084Sjohnlev #ifdef __xpv
1625084Sjohnlev int tsc_gethrtime_enable = 0;
1635084Sjohnlev #else
1640Sstevel@tonic-gate int tsc_gethrtime_enable = 1;
1655084Sjohnlev #endif
1660Sstevel@tonic-gate int tsc_gethrtime_initted = 0;
1670Sstevel@tonic-gate 
1680Sstevel@tonic-gate /*
1695084Sjohnlev  * True if the hrtime implementation is "hires"; namely, better than microdata.
1705084Sjohnlev  */
1715084Sjohnlev int gethrtime_hires = 0;
1725084Sjohnlev 
1735084Sjohnlev /*
1740Sstevel@tonic-gate  * Local Static Data
1750Sstevel@tonic-gate  */
1760Sstevel@tonic-gate static struct psm_ops mach_ops;
1770Sstevel@tonic-gate static struct psm_ops *mach_set[4] = {&mach_ops, NULL, NULL, NULL};
1780Sstevel@tonic-gate static ushort_t mach_ver[4] = {0, 0, 0, 0};
1790Sstevel@tonic-gate 
1800Sstevel@tonic-gate /*
1818675SVikram.Hegde@Sun.COM  * virtualization support for psm
1828675SVikram.Hegde@Sun.COM  */
1838675SVikram.Hegde@Sun.COM void *psm_vt_ops = NULL;
1848675SVikram.Hegde@Sun.COM /*
1853446Smrj  * If non-zero, idle cpus will become "halted" when there's
1860Sstevel@tonic-gate  * no work to do.
1870Sstevel@tonic-gate  */
1883446Smrj int	idle_cpu_use_hlt = 1;
1890Sstevel@tonic-gate 
1905084Sjohnlev #ifndef __xpv
1914481Sbholler /*
1924481Sbholler  * If non-zero, idle cpus will use mwait if available to halt instead of hlt.
1934481Sbholler  */
1944481Sbholler int	idle_cpu_prefer_mwait = 1;
1957716SBill.Holler@Sun.COM /*
1967716SBill.Holler@Sun.COM  * Set to 0 to avoid MONITOR+CLFLUSH assertion.
1977716SBill.Holler@Sun.COM  */
1987716SBill.Holler@Sun.COM int	idle_cpu_assert_cflush_monitor = 1;
1997716SBill.Holler@Sun.COM 
2008906SEric.Saxe@Sun.COM /*
2018906SEric.Saxe@Sun.COM  * If non-zero, idle cpus will not use power saving Deep C-States idle loop.
2028906SEric.Saxe@Sun.COM  */
2038906SEric.Saxe@Sun.COM int	idle_cpu_no_deep_c = 0;
2048906SEric.Saxe@Sun.COM /*
2058906SEric.Saxe@Sun.COM  * Non-power saving idle loop and wakeup pointers.
2068906SEric.Saxe@Sun.COM  * Allows user to toggle Deep Idle power saving feature on/off.
2078906SEric.Saxe@Sun.COM  */
2088906SEric.Saxe@Sun.COM void	(*non_deep_idle_cpu)() = cpu_idle;
2098906SEric.Saxe@Sun.COM void	(*non_deep_idle_disp_enq_thread)(cpu_t *, int);
2108906SEric.Saxe@Sun.COM 
2118906SEric.Saxe@Sun.COM /*
2128906SEric.Saxe@Sun.COM  * Object for the kernel to access the HPET.
2138906SEric.Saxe@Sun.COM  */
2148906SEric.Saxe@Sun.COM hpet_t hpet;
2158906SEric.Saxe@Sun.COM 
2168906SEric.Saxe@Sun.COM #endif	/* ifndef __xpv */
2173434Sesaxe 
2183434Sesaxe /*ARGSUSED*/
2193434Sesaxe int
2203434Sesaxe pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw)
2213434Sesaxe {
2223434Sesaxe 	switch (hw) {
2233434Sesaxe 	case PGHW_IPIPE:
2243434Sesaxe 		if (x86_feature & (X86_HTT)) {
2253434Sesaxe 			/*
2263434Sesaxe 			 * Hyper-threading is SMT
2273434Sesaxe 			 */
2283434Sesaxe 			return (1);
2293434Sesaxe 		} else {
2303434Sesaxe 			return (0);
2313434Sesaxe 		}
2323434Sesaxe 	case PGHW_CHIP:
2333434Sesaxe 		if (x86_feature & (X86_CMP|X86_HTT))
2343434Sesaxe 			return (1);
2353434Sesaxe 		else
2363434Sesaxe 			return (0);
2374606Sesaxe 	case PGHW_CACHE:
2384606Sesaxe 		if (cpuid_get_ncpu_sharing_last_cache(cp) > 1)
2394606Sesaxe 			return (1);
2404606Sesaxe 		else
2414606Sesaxe 			return (0);
2428906SEric.Saxe@Sun.COM 	case PGHW_POW_ACTIVE:
2438906SEric.Saxe@Sun.COM 		if (cpupm_domain_id(cp, CPUPM_DTYPE_ACTIVE) != (id_t)-1)
2448906SEric.Saxe@Sun.COM 			return (1);
2458906SEric.Saxe@Sun.COM 		else
2468906SEric.Saxe@Sun.COM 			return (0);
2478906SEric.Saxe@Sun.COM 	case PGHW_POW_IDLE:
2488906SEric.Saxe@Sun.COM 		if (cpupm_domain_id(cp, CPUPM_DTYPE_IDLE) != (id_t)-1)
2498906SEric.Saxe@Sun.COM 			return (1);
2508906SEric.Saxe@Sun.COM 		else
2518906SEric.Saxe@Sun.COM 			return (0);
2523434Sesaxe 	default:
2533434Sesaxe 		return (0);
2543434Sesaxe 	}
2553434Sesaxe }
2563434Sesaxe 
2573434Sesaxe /*
2583434Sesaxe  * Compare two CPUs and see if they have a pghw_type_t sharing relationship
2593434Sesaxe  * If pghw_type_t is an unsupported hardware type, then return -1
2603434Sesaxe  */
2613434Sesaxe int
2623434Sesaxe pg_plat_cpus_share(cpu_t *cpu_a, cpu_t *cpu_b, pghw_type_t hw)
2630Sstevel@tonic-gate {
2643434Sesaxe 	id_t pgp_a, pgp_b;
2653434Sesaxe 
2663434Sesaxe 	pgp_a = pg_plat_hw_instance_id(cpu_a, hw);
2673434Sesaxe 	pgp_b = pg_plat_hw_instance_id(cpu_b, hw);
2683434Sesaxe 
2693434Sesaxe 	if (pgp_a == -1 || pgp_b == -1)
2703434Sesaxe 		return (-1);
2713434Sesaxe 
2723434Sesaxe 	return (pgp_a == pgp_b);
2733434Sesaxe }
2743434Sesaxe 
2753434Sesaxe /*
2763434Sesaxe  * Return a physical instance identifier for known hardware sharing
2773434Sesaxe  * relationships
2783434Sesaxe  */
2793434Sesaxe id_t
2803434Sesaxe pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw)
2813434Sesaxe {
2823434Sesaxe 	switch (hw) {
2833434Sesaxe 	case PGHW_IPIPE:
2843434Sesaxe 		return (cpuid_get_coreid(cpu));
2854606Sesaxe 	case PGHW_CACHE:
2864606Sesaxe 		return (cpuid_get_last_lvl_cacheid(cpu));
2873434Sesaxe 	case PGHW_CHIP:
2883434Sesaxe 		return (cpuid_get_chipid(cpu));
2898906SEric.Saxe@Sun.COM 	case PGHW_POW_ACTIVE:
2908906SEric.Saxe@Sun.COM 		return (cpupm_domain_id(cpu, CPUPM_DTYPE_ACTIVE));
2918906SEric.Saxe@Sun.COM 	case PGHW_POW_IDLE:
2928906SEric.Saxe@Sun.COM 		return (cpupm_domain_id(cpu, CPUPM_DTYPE_IDLE));
2933434Sesaxe 	default:
2943434Sesaxe 		return (-1);
2951228Sandrei 	}
2963434Sesaxe }
2970Sstevel@tonic-gate 
2988906SEric.Saxe@Sun.COM /*
2998906SEric.Saxe@Sun.COM  * Express preference for optimizing for sharing relationship
3008906SEric.Saxe@Sun.COM  * hw1 vs hw2
3018906SEric.Saxe@Sun.COM  */
3028906SEric.Saxe@Sun.COM pghw_type_t
3038906SEric.Saxe@Sun.COM pg_plat_hw_rank(pghw_type_t hw1, pghw_type_t hw2)
3043434Sesaxe {
3058906SEric.Saxe@Sun.COM 	int i, rank1, rank2;
3068906SEric.Saxe@Sun.COM 
3073434Sesaxe 	static pghw_type_t hw_hier[] = {
3083434Sesaxe 		PGHW_IPIPE,
3094606Sesaxe 		PGHW_CACHE,
3103434Sesaxe 		PGHW_CHIP,
3118906SEric.Saxe@Sun.COM 		PGHW_POW_IDLE,
3128906SEric.Saxe@Sun.COM 		PGHW_POW_ACTIVE,
3133434Sesaxe 		PGHW_NUM_COMPONENTS
3143434Sesaxe 	};
3153434Sesaxe 
3163434Sesaxe 	for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) {
3178906SEric.Saxe@Sun.COM 		if (hw_hier[i] == hw1)
3188906SEric.Saxe@Sun.COM 			rank1 = i;
3198906SEric.Saxe@Sun.COM 		if (hw_hier[i] == hw2)
3208906SEric.Saxe@Sun.COM 			rank2 = i;
3213434Sesaxe 	}
3228906SEric.Saxe@Sun.COM 
3238906SEric.Saxe@Sun.COM 	if (rank1 > rank2)
3248906SEric.Saxe@Sun.COM 		return (hw1);
3258906SEric.Saxe@Sun.COM 	else
3268906SEric.Saxe@Sun.COM 		return (hw2);
3273434Sesaxe }
3283434Sesaxe 
3294606Sesaxe /*
3308906SEric.Saxe@Sun.COM  * Override the default CMT dispatcher policy for the specified
3318906SEric.Saxe@Sun.COM  * hardware sharing relationship
3324606Sesaxe  */
3338906SEric.Saxe@Sun.COM pg_cmt_policy_t
3348906SEric.Saxe@Sun.COM pg_plat_cmt_policy(pghw_type_t hw)
3354606Sesaxe {
3368906SEric.Saxe@Sun.COM 	/*
3378906SEric.Saxe@Sun.COM 	 * For shared caches, also load balance across them to
3388906SEric.Saxe@Sun.COM 	 * maximize aggregate cache capacity
3398906SEric.Saxe@Sun.COM 	 */
3408906SEric.Saxe@Sun.COM 	switch (hw) {
3418906SEric.Saxe@Sun.COM 	case PGHW_CACHE:
3428906SEric.Saxe@Sun.COM 		return (CMT_BALANCE|CMT_AFFINITY);
3438906SEric.Saxe@Sun.COM 	default:
3448906SEric.Saxe@Sun.COM 		return (CMT_NO_POLICY);
3458906SEric.Saxe@Sun.COM 	}
3464606Sesaxe }
3474606Sesaxe 
3483434Sesaxe id_t
3493434Sesaxe pg_plat_get_core_id(cpu_t *cpu)
3503434Sesaxe {
3513434Sesaxe 	return ((id_t)cpuid_get_coreid(cpu));
3523434Sesaxe }
3533434Sesaxe 
3543434Sesaxe void
3553434Sesaxe cmp_set_nosteal_interval(void)
3563434Sesaxe {
3573434Sesaxe 	/* Set the nosteal interval (used by disp_getbest()) to 100us */
3583434Sesaxe 	nosteal_nsec = 100000UL;
3590Sstevel@tonic-gate }
3600Sstevel@tonic-gate 
3610Sstevel@tonic-gate /*
3620Sstevel@tonic-gate  * Routine to ensure initial callers to hrtime gets 0 as return
3630Sstevel@tonic-gate  */
3640Sstevel@tonic-gate static hrtime_t
3650Sstevel@tonic-gate dummy_hrtime(void)
3660Sstevel@tonic-gate {
3670Sstevel@tonic-gate 	return (0);
3680Sstevel@tonic-gate }
3690Sstevel@tonic-gate 
3700Sstevel@tonic-gate /* ARGSUSED */
3710Sstevel@tonic-gate static void
3720Sstevel@tonic-gate dummy_scalehrtime(hrtime_t *ticks)
3730Sstevel@tonic-gate {}
3740Sstevel@tonic-gate 
3750Sstevel@tonic-gate /*
3768906SEric.Saxe@Sun.COM  * Supports Deep C-State power saving idle loop.
3778906SEric.Saxe@Sun.COM  */
3788906SEric.Saxe@Sun.COM void
3798906SEric.Saxe@Sun.COM cpu_idle_adaptive(void)
3808906SEric.Saxe@Sun.COM {
3818906SEric.Saxe@Sun.COM 	(*CPU->cpu_m.mcpu_idle_cpu)();
3828906SEric.Saxe@Sun.COM }
3838906SEric.Saxe@Sun.COM 
3849637SRandy.Fishel@Sun.COM /*
3859637SRandy.Fishel@Sun.COM  * Function called by CPU idle notification framework to check whether CPU
3869637SRandy.Fishel@Sun.COM  * has been awakened. It will be called with interrupt disabled.
3879637SRandy.Fishel@Sun.COM  * If CPU has been awakened, call cpu_idle_exit() to notify CPU idle
3889637SRandy.Fishel@Sun.COM  * notification framework.
3899637SRandy.Fishel@Sun.COM  */
3909637SRandy.Fishel@Sun.COM /*ARGSUSED*/
3919637SRandy.Fishel@Sun.COM static void
3929637SRandy.Fishel@Sun.COM cpu_idle_check_wakeup(void *arg)
3938906SEric.Saxe@Sun.COM {
3949637SRandy.Fishel@Sun.COM 	/*
3959637SRandy.Fishel@Sun.COM 	 * Toggle interrupt flag to detect pending interrupts.
3969637SRandy.Fishel@Sun.COM 	 * If interrupt happened, do_interrupt() will notify CPU idle
3979637SRandy.Fishel@Sun.COM 	 * notification framework so no need to call cpu_idle_exit() here.
3989637SRandy.Fishel@Sun.COM 	 */
3999637SRandy.Fishel@Sun.COM 	sti();
4009637SRandy.Fishel@Sun.COM 	SMT_PAUSE();
4019637SRandy.Fishel@Sun.COM 	cli();
4028906SEric.Saxe@Sun.COM }
4038906SEric.Saxe@Sun.COM 
4048906SEric.Saxe@Sun.COM /*
4059637SRandy.Fishel@Sun.COM  * Idle the present CPU until wakened via an interrupt
4060Sstevel@tonic-gate  */
4078906SEric.Saxe@Sun.COM void
4083446Smrj cpu_idle(void)
4090Sstevel@tonic-gate {
4100Sstevel@tonic-gate 	cpu_t		*cpup = CPU;
4118408SEric.Saxe@Sun.COM 	processorid_t	cpu_sid = cpup->cpu_seqid;
412711Sesaxe 	cpupart_t	*cp = cpup->cpu_part;
4130Sstevel@tonic-gate 	int		hset_update = 1;
4140Sstevel@tonic-gate 
4150Sstevel@tonic-gate 	/*
4160Sstevel@tonic-gate 	 * If this CPU is online, and there's multiple CPUs
4170Sstevel@tonic-gate 	 * in the system, then we should notate our halting
4180Sstevel@tonic-gate 	 * by adding ourselves to the partition's halted CPU
4190Sstevel@tonic-gate 	 * bitmap. This allows other CPUs to find/awaken us when
4200Sstevel@tonic-gate 	 * work becomes available.
4210Sstevel@tonic-gate 	 */
4220Sstevel@tonic-gate 	if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1)
4230Sstevel@tonic-gate 		hset_update = 0;
4240Sstevel@tonic-gate 
4250Sstevel@tonic-gate 	/*
4268408SEric.Saxe@Sun.COM 	 * Add ourselves to the partition's halted CPUs bitmap
4270Sstevel@tonic-gate 	 * and set our HALTED flag, if necessary.
4280Sstevel@tonic-gate 	 *
429711Sesaxe 	 * When a thread becomes runnable, it is placed on the queue
4308408SEric.Saxe@Sun.COM 	 * and then the halted CPU bitmap is checked to determine who
4319637SRandy.Fishel@Sun.COM 	 * (if anyone) should be awakened. We therefore need to first
4328408SEric.Saxe@Sun.COM 	 * add ourselves to the bitmap, and and then check if there
4338408SEric.Saxe@Sun.COM 	 * is any work available. The order is important to prevent a race
4348408SEric.Saxe@Sun.COM 	 * that can lead to work languishing on a run queue somewhere while
4358408SEric.Saxe@Sun.COM 	 * this CPU remains halted.
4368408SEric.Saxe@Sun.COM 	 *
4378408SEric.Saxe@Sun.COM 	 * Either the producing CPU will see we're halted and will awaken us,
4388408SEric.Saxe@Sun.COM 	 * or this CPU will see the work available in disp_anywork().
439711Sesaxe 	 *
4400Sstevel@tonic-gate 	 * Note that memory barriers after updating the HALTED flag
4418408SEric.Saxe@Sun.COM 	 * are not necessary since an atomic operation (updating the bitset)
4420Sstevel@tonic-gate 	 * immediately follows. On x86 the atomic operation acts as a
4430Sstevel@tonic-gate 	 * memory barrier for the update of cpu_disp_flags.
4440Sstevel@tonic-gate 	 */
4450Sstevel@tonic-gate 	if (hset_update) {
4460Sstevel@tonic-gate 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
4478408SEric.Saxe@Sun.COM 		bitset_atomic_add(&cp->cp_haltset, cpu_sid);
4480Sstevel@tonic-gate 	}
4490Sstevel@tonic-gate 
4500Sstevel@tonic-gate 	/*
4510Sstevel@tonic-gate 	 * Check to make sure there's really nothing to do.
452711Sesaxe 	 * Work destined for this CPU may become available after
453711Sesaxe 	 * this check. We'll be notified through the clearing of our
4548408SEric.Saxe@Sun.COM 	 * bit in the halted CPU bitmap, and a poke.
4550Sstevel@tonic-gate 	 */
4560Sstevel@tonic-gate 	if (disp_anywork()) {
4570Sstevel@tonic-gate 		if (hset_update) {
4580Sstevel@tonic-gate 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
4598408SEric.Saxe@Sun.COM 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
4600Sstevel@tonic-gate 		}
461711Sesaxe 		return;
462711Sesaxe 	}
463711Sesaxe 
464711Sesaxe 	/*
465711Sesaxe 	 * We're on our way to being halted.
466711Sesaxe 	 *
467711Sesaxe 	 * Disable interrupts now, so that we'll awaken immediately
468711Sesaxe 	 * after halting if someone tries to poke us between now and
469711Sesaxe 	 * the time we actually halt.
470711Sesaxe 	 *
471711Sesaxe 	 * We check for the presence of our bit after disabling interrupts.
472711Sesaxe 	 * If it's cleared, we'll return. If the bit is cleared after
473711Sesaxe 	 * we check then the poke will pop us out of the halted state.
474711Sesaxe 	 *
475711Sesaxe 	 * This means that the ordering of the poke and the clearing
476711Sesaxe 	 * of the bit by cpu_wakeup is important.
477711Sesaxe 	 * cpu_wakeup() must clear, then poke.
4783446Smrj 	 * cpu_idle() must disable interrupts, then check for the bit.
479711Sesaxe 	 */
480711Sesaxe 	cli();
481711Sesaxe 
4828408SEric.Saxe@Sun.COM 	if (hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid) == 0) {
483711Sesaxe 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
484711Sesaxe 		sti();
485711Sesaxe 		return;
486711Sesaxe 	}
487711Sesaxe 
488711Sesaxe 	/*
489711Sesaxe 	 * The check for anything locally runnable is here for performance
490711Sesaxe 	 * and isn't needed for correctness. disp_nrunnable ought to be
491711Sesaxe 	 * in our cache still, so it's inexpensive to check, and if there
492711Sesaxe 	 * is anything runnable we won't have to wait for the poke.
493711Sesaxe 	 */
494711Sesaxe 	if (cpup->cpu_disp->disp_nrunnable != 0) {
495711Sesaxe 		if (hset_update) {
496711Sesaxe 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
4978408SEric.Saxe@Sun.COM 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
498711Sesaxe 		}
4990Sstevel@tonic-gate 		sti();
5000Sstevel@tonic-gate 		return;
5010Sstevel@tonic-gate 	}
5020Sstevel@tonic-gate 
5039637SRandy.Fishel@Sun.COM 	if (cpu_idle_enter(IDLE_STATE_C1, 0,
5049637SRandy.Fishel@Sun.COM 	    cpu_idle_check_wakeup, NULL) == 0) {
5059637SRandy.Fishel@Sun.COM 		mach_cpu_idle();
5069637SRandy.Fishel@Sun.COM 		cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
5079637SRandy.Fishel@Sun.COM 	}
5087006Srv207048 
5090Sstevel@tonic-gate 	/*
5100Sstevel@tonic-gate 	 * We're no longer halted
5110Sstevel@tonic-gate 	 */
5120Sstevel@tonic-gate 	if (hset_update) {
5130Sstevel@tonic-gate 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
5148408SEric.Saxe@Sun.COM 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
5150Sstevel@tonic-gate 	}
5160Sstevel@tonic-gate }
5170Sstevel@tonic-gate 
5180Sstevel@tonic-gate 
5190Sstevel@tonic-gate /*
5200Sstevel@tonic-gate  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
5210Sstevel@tonic-gate  * Otherwise, see if other CPUs in the cpu partition are halted and need to
5220Sstevel@tonic-gate  * be woken up so that they can steal the thread we placed on this CPU.
5230Sstevel@tonic-gate  * This function is only used on MP systems.
5240Sstevel@tonic-gate  */
5250Sstevel@tonic-gate static void
5260Sstevel@tonic-gate cpu_wakeup(cpu_t *cpu, int bound)
5270Sstevel@tonic-gate {
5280Sstevel@tonic-gate 	uint_t		cpu_found;
5298408SEric.Saxe@Sun.COM 	processorid_t	cpu_sid;
5300Sstevel@tonic-gate 	cpupart_t	*cp;
5310Sstevel@tonic-gate 
5320Sstevel@tonic-gate 	cp = cpu->cpu_part;
5338408SEric.Saxe@Sun.COM 	cpu_sid = cpu->cpu_seqid;
5348408SEric.Saxe@Sun.COM 	if (bitset_in_set(&cp->cp_haltset, cpu_sid)) {
5350Sstevel@tonic-gate 		/*
5360Sstevel@tonic-gate 		 * Clear the halted bit for that CPU since it will be
5370Sstevel@tonic-gate 		 * poked in a moment.
5380Sstevel@tonic-gate 		 */
5398408SEric.Saxe@Sun.COM 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
5400Sstevel@tonic-gate 		/*
5410Sstevel@tonic-gate 		 * We may find the current CPU present in the halted cpuset
5420Sstevel@tonic-gate 		 * if we're in the context of an interrupt that occurred
5433446Smrj 		 * before we had a chance to clear our bit in cpu_idle().
5440Sstevel@tonic-gate 		 * Poking ourself is obviously unnecessary, since if
5450Sstevel@tonic-gate 		 * we're here, we're not halted.
5460Sstevel@tonic-gate 		 */
5470Sstevel@tonic-gate 		if (cpu != CPU)
5480Sstevel@tonic-gate 			poke_cpu(cpu->cpu_id);
5490Sstevel@tonic-gate 		return;
5500Sstevel@tonic-gate 	} else {
5510Sstevel@tonic-gate 		/*
5520Sstevel@tonic-gate 		 * This cpu isn't halted, but it's idle or undergoing a
5530Sstevel@tonic-gate 		 * context switch. No need to awaken anyone else.
5540Sstevel@tonic-gate 		 */
5550Sstevel@tonic-gate 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
5560Sstevel@tonic-gate 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
5570Sstevel@tonic-gate 			return;
5580Sstevel@tonic-gate 	}
5590Sstevel@tonic-gate 
5600Sstevel@tonic-gate 	/*
5618408SEric.Saxe@Sun.COM 	 * No need to wake up other CPUs if this is for a bound thread.
5620Sstevel@tonic-gate 	 */
5630Sstevel@tonic-gate 	if (bound)
5640Sstevel@tonic-gate 		return;
5650Sstevel@tonic-gate 
5660Sstevel@tonic-gate 	/*
5678408SEric.Saxe@Sun.COM 	 * The CPU specified for wakeup isn't currently halted, so check
5688408SEric.Saxe@Sun.COM 	 * to see if there are any other halted CPUs in the partition,
5698408SEric.Saxe@Sun.COM 	 * and if there are then awaken one.
5700Sstevel@tonic-gate 	 */
5710Sstevel@tonic-gate 	do {
5728408SEric.Saxe@Sun.COM 		cpu_found = bitset_find(&cp->cp_haltset);
5738408SEric.Saxe@Sun.COM 		if (cpu_found == (uint_t)-1)
5740Sstevel@tonic-gate 			return;
5758408SEric.Saxe@Sun.COM 	} while (bitset_atomic_test_and_del(&cp->cp_haltset, cpu_found) < 0);
5760Sstevel@tonic-gate 
5778408SEric.Saxe@Sun.COM 	if (cpu_found != CPU->cpu_seqid) {
5788408SEric.Saxe@Sun.COM 		poke_cpu(cpu_seq[cpu_found]->cpu_id);
5798408SEric.Saxe@Sun.COM 	}
5800Sstevel@tonic-gate }
5810Sstevel@tonic-gate 
5825084Sjohnlev #ifndef __xpv
5834481Sbholler /*
5849637SRandy.Fishel@Sun.COM  * Function called by CPU idle notification framework to check whether CPU
5859637SRandy.Fishel@Sun.COM  * has been awakened. It will be called with interrupt disabled.
5869637SRandy.Fishel@Sun.COM  * If CPU has been awakened, call cpu_idle_exit() to notify CPU idle
5879637SRandy.Fishel@Sun.COM  * notification framework.
5889637SRandy.Fishel@Sun.COM  */
5899637SRandy.Fishel@Sun.COM static void
5909637SRandy.Fishel@Sun.COM cpu_idle_mwait_check_wakeup(void *arg)
5919637SRandy.Fishel@Sun.COM {
5929637SRandy.Fishel@Sun.COM 	volatile uint32_t *mcpu_mwait = (volatile uint32_t *)arg;
5939637SRandy.Fishel@Sun.COM 
5949637SRandy.Fishel@Sun.COM 	ASSERT(arg != NULL);
5959637SRandy.Fishel@Sun.COM 	if (*mcpu_mwait != MWAIT_HALTED) {
5969637SRandy.Fishel@Sun.COM 		/*
5979637SRandy.Fishel@Sun.COM 		 * CPU has been awakened, notify CPU idle notification system.
5989637SRandy.Fishel@Sun.COM 		 */
5999637SRandy.Fishel@Sun.COM 		cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
6009637SRandy.Fishel@Sun.COM 	} else {
6019637SRandy.Fishel@Sun.COM 		/*
6029637SRandy.Fishel@Sun.COM 		 * Toggle interrupt flag to detect pending interrupts.
6039637SRandy.Fishel@Sun.COM 		 * If interrupt happened, do_interrupt() will notify CPU idle
6049637SRandy.Fishel@Sun.COM 		 * notification framework so no need to call cpu_idle_exit()
6059637SRandy.Fishel@Sun.COM 		 * here.
6069637SRandy.Fishel@Sun.COM 		 */
6079637SRandy.Fishel@Sun.COM 		sti();
6089637SRandy.Fishel@Sun.COM 		SMT_PAUSE();
6099637SRandy.Fishel@Sun.COM 		cli();
6109637SRandy.Fishel@Sun.COM 	}
6119637SRandy.Fishel@Sun.COM }
6129637SRandy.Fishel@Sun.COM 
6139637SRandy.Fishel@Sun.COM /*
6149637SRandy.Fishel@Sun.COM  * Idle the present CPU until awakened via touching its monitored line
6154481Sbholler  */
6168906SEric.Saxe@Sun.COM void
6174481Sbholler cpu_idle_mwait(void)
6184481Sbholler {
6194481Sbholler 	volatile uint32_t	*mcpu_mwait = CPU->cpu_m.mcpu_mwait;
6204481Sbholler 	cpu_t			*cpup = CPU;
6218408SEric.Saxe@Sun.COM 	processorid_t		cpu_sid = cpup->cpu_seqid;
6224481Sbholler 	cpupart_t		*cp = cpup->cpu_part;
6234481Sbholler 	int			hset_update = 1;
6244481Sbholler 
6254481Sbholler 	/*
6268906SEric.Saxe@Sun.COM 	 * Set our mcpu_mwait here, so we can tell if anyone tries to
6274481Sbholler 	 * wake us between now and when we call mwait.  No other cpu will
6288408SEric.Saxe@Sun.COM 	 * attempt to set our mcpu_mwait until we add ourself to the halted
6298408SEric.Saxe@Sun.COM 	 * CPU bitmap.
6304481Sbholler 	 */
6314481Sbholler 	*mcpu_mwait = MWAIT_HALTED;
6324481Sbholler 
6334481Sbholler 	/*
6344481Sbholler 	 * If this CPU is online, and there's multiple CPUs
6358906SEric.Saxe@Sun.COM 	 * in the system, then we should note our halting
6364481Sbholler 	 * by adding ourselves to the partition's halted CPU
6374481Sbholler 	 * bitmap. This allows other CPUs to find/awaken us when
6384481Sbholler 	 * work becomes available.
6394481Sbholler 	 */
6404481Sbholler 	if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1)
6414481Sbholler 		hset_update = 0;
6424481Sbholler 
6434481Sbholler 	/*
6448408SEric.Saxe@Sun.COM 	 * Add ourselves to the partition's halted CPUs bitmap
6454481Sbholler 	 * and set our HALTED flag, if necessary.
6464481Sbholler 	 *
6474481Sbholler 	 * When a thread becomes runnable, it is placed on the queue
6488408SEric.Saxe@Sun.COM 	 * and then the halted CPU bitmap is checked to determine who
6498906SEric.Saxe@Sun.COM 	 * (if anyone) should be awakened. We therefore need to first
6508408SEric.Saxe@Sun.COM 	 * add ourselves to the bitmap, and and then check if there
6514481Sbholler 	 * is any work available.
6524481Sbholler 	 *
6534481Sbholler 	 * Note that memory barriers after updating the HALTED flag
6544481Sbholler 	 * are not necessary since an atomic operation (updating the bitmap)
6554481Sbholler 	 * immediately follows. On x86 the atomic operation acts as a
6564481Sbholler 	 * memory barrier for the update of cpu_disp_flags.
6574481Sbholler 	 */
6584481Sbholler 	if (hset_update) {
6594481Sbholler 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
6608408SEric.Saxe@Sun.COM 		bitset_atomic_add(&cp->cp_haltset, cpu_sid);
6614481Sbholler 	}
6624481Sbholler 
6634481Sbholler 	/*
6644481Sbholler 	 * Check to make sure there's really nothing to do.
6654481Sbholler 	 * Work destined for this CPU may become available after
6664481Sbholler 	 * this check. We'll be notified through the clearing of our
6678408SEric.Saxe@Sun.COM 	 * bit in the halted CPU bitmap, and a write to our mcpu_mwait.
6684481Sbholler 	 *
6694481Sbholler 	 * disp_anywork() checks disp_nrunnable, so we do not have to later.
6704481Sbholler 	 */
6714481Sbholler 	if (disp_anywork()) {
6724481Sbholler 		if (hset_update) {
6734481Sbholler 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
6748408SEric.Saxe@Sun.COM 			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
6754481Sbholler 		}
6764481Sbholler 		return;
6774481Sbholler 	}
6784481Sbholler 
6794481Sbholler 	/*
6804481Sbholler 	 * We're on our way to being halted.
6814481Sbholler 	 * To avoid a lost wakeup, arm the monitor before checking if another
6824481Sbholler 	 * cpu wrote to mcpu_mwait to wake us up.
6834481Sbholler 	 */
6844481Sbholler 	i86_monitor(mcpu_mwait, 0, 0);
6854481Sbholler 	if (*mcpu_mwait == MWAIT_HALTED) {
6869637SRandy.Fishel@Sun.COM 		if (cpu_idle_enter(IDLE_STATE_C1, 0,
6879637SRandy.Fishel@Sun.COM 		    cpu_idle_mwait_check_wakeup, (void *)mcpu_mwait) == 0) {
6889637SRandy.Fishel@Sun.COM 			if (*mcpu_mwait == MWAIT_HALTED) {
6899637SRandy.Fishel@Sun.COM 				i86_mwait(0, 0);
6909637SRandy.Fishel@Sun.COM 			}
6919637SRandy.Fishel@Sun.COM 			cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
6929637SRandy.Fishel@Sun.COM 		}
6934481Sbholler 	}
6944481Sbholler 
6954481Sbholler 	/*
6964481Sbholler 	 * We're no longer halted
6974481Sbholler 	 */
6984481Sbholler 	if (hset_update) {
6994481Sbholler 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
7008408SEric.Saxe@Sun.COM 		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
7014481Sbholler 	}
7024481Sbholler }
7034481Sbholler 
7044481Sbholler /*
7054481Sbholler  * If "cpu" is halted in mwait, then wake it up clearing its halted bit in
7064481Sbholler  * advance.  Otherwise, see if other CPUs in the cpu partition are halted and
7074481Sbholler  * need to be woken up so that they can steal the thread we placed on this CPU.
7084481Sbholler  * This function is only used on MP systems.
7094481Sbholler  */
7104481Sbholler static void
7114481Sbholler cpu_wakeup_mwait(cpu_t *cp, int bound)
7124481Sbholler {
7134481Sbholler 	cpupart_t	*cpu_part;
7144481Sbholler 	uint_t		cpu_found;
7158408SEric.Saxe@Sun.COM 	processorid_t	cpu_sid;
7164481Sbholler 
7174481Sbholler 	cpu_part = cp->cpu_part;
7188408SEric.Saxe@Sun.COM 	cpu_sid = cp->cpu_seqid;
7194481Sbholler 
7204481Sbholler 	/*
7214481Sbholler 	 * Clear the halted bit for that CPU since it will be woken up
7224481Sbholler 	 * in a moment.
7234481Sbholler 	 */
7248408SEric.Saxe@Sun.COM 	if (bitset_in_set(&cpu_part->cp_haltset, cpu_sid)) {
7254481Sbholler 		/*
7264481Sbholler 		 * Clear the halted bit for that CPU since it will be
7274481Sbholler 		 * poked in a moment.
7284481Sbholler 		 */
7298408SEric.Saxe@Sun.COM 		bitset_atomic_del(&cpu_part->cp_haltset, cpu_sid);
7304481Sbholler 		/*
7314481Sbholler 		 * We may find the current CPU present in the halted cpuset
7324481Sbholler 		 * if we're in the context of an interrupt that occurred
7334481Sbholler 		 * before we had a chance to clear our bit in cpu_idle().
7344481Sbholler 		 * Waking ourself is obviously unnecessary, since if
7354481Sbholler 		 * we're here, we're not halted.
7364481Sbholler 		 *
7374481Sbholler 		 * monitor/mwait wakeup via writing to our cache line is
7384481Sbholler 		 * harmless and less expensive than always checking if we
7394481Sbholler 		 * are waking ourself which is an uncommon case.
7404481Sbholler 		 */
7414481Sbholler 		MWAIT_WAKEUP(cp);	/* write to monitored line */
7424481Sbholler 		return;
7434481Sbholler 	} else {
7444481Sbholler 		/*
7454481Sbholler 		 * This cpu isn't halted, but it's idle or undergoing a
7464481Sbholler 		 * context switch. No need to awaken anyone else.
7474481Sbholler 		 */
7484481Sbholler 		if (cp->cpu_thread == cp->cpu_idle_thread ||
7494481Sbholler 		    cp->cpu_disp_flags & CPU_DISP_DONTSTEAL)
7504481Sbholler 			return;
7514481Sbholler 	}
7524481Sbholler 
7534481Sbholler 	/*
7544481Sbholler 	 * No need to wake up other CPUs if the thread we just enqueued
7554481Sbholler 	 * is bound.
7564481Sbholler 	 */
7578408SEric.Saxe@Sun.COM 	if (bound || ncpus == 1)
7584481Sbholler 		return;
7594481Sbholler 
7604481Sbholler 	/*
7614481Sbholler 	 * See if there's any other halted CPUs. If there are, then
7624481Sbholler 	 * select one, and awaken it.
7634481Sbholler 	 * It's possible that after we find a CPU, somebody else
7644481Sbholler 	 * will awaken it before we get the chance.
7654481Sbholler 	 * In that case, look again.
7664481Sbholler 	 */
7674481Sbholler 	do {
7688408SEric.Saxe@Sun.COM 		cpu_found = bitset_find(&cpu_part->cp_haltset);
7698408SEric.Saxe@Sun.COM 		if (cpu_found == (uint_t)-1)
7704481Sbholler 			return;
7718408SEric.Saxe@Sun.COM 	} while (bitset_atomic_test_and_del(&cpu_part->cp_haltset,
7728408SEric.Saxe@Sun.COM 	    cpu_found) < 0);
7734481Sbholler 
7744481Sbholler 	/*
7758408SEric.Saxe@Sun.COM 	 * Do not check if cpu_found is ourself as monitor/mwait
7768408SEric.Saxe@Sun.COM 	 * wakeup is cheap.
7774481Sbholler 	 */
7788408SEric.Saxe@Sun.COM 	MWAIT_WAKEUP(cpu_seq[cpu_found]); /* write to monitored line */
7794481Sbholler }
7808408SEric.Saxe@Sun.COM 
7815084Sjohnlev #endif
7824481Sbholler 
7833446Smrj void (*cpu_pause_handler)(volatile char *) = NULL;
7843446Smrj 
7850Sstevel@tonic-gate static int
7860Sstevel@tonic-gate mp_disable_intr(int cpun)
7870Sstevel@tonic-gate {
7880Sstevel@tonic-gate 	/*
7890Sstevel@tonic-gate 	 * switch to the offline cpu
7900Sstevel@tonic-gate 	 */
7910Sstevel@tonic-gate 	affinity_set(cpun);
7920Sstevel@tonic-gate 	/*
7930Sstevel@tonic-gate 	 * raise ipl to just below cross call
7940Sstevel@tonic-gate 	 */
7959489SJoe.Bonasera@sun.com 	splx(XC_SYS_PIL - 1);
7960Sstevel@tonic-gate 	/*
7970Sstevel@tonic-gate 	 *	set base spl to prevent the next swtch to idle from
7980Sstevel@tonic-gate 	 *	lowering back to ipl 0
7990Sstevel@tonic-gate 	 */
8009489SJoe.Bonasera@sun.com 	CPU->cpu_intr_actv |= (1 << (XC_SYS_PIL - 1));
8010Sstevel@tonic-gate 	set_base_spl();
8020Sstevel@tonic-gate 	affinity_clear();
8030Sstevel@tonic-gate 	return (DDI_SUCCESS);
8040Sstevel@tonic-gate }
8050Sstevel@tonic-gate 
8060Sstevel@tonic-gate static void
8070Sstevel@tonic-gate mp_enable_intr(int cpun)
8080Sstevel@tonic-gate {
8090Sstevel@tonic-gate 	/*
8100Sstevel@tonic-gate 	 * switch to the online cpu
8110Sstevel@tonic-gate 	 */
8120Sstevel@tonic-gate 	affinity_set(cpun);
8130Sstevel@tonic-gate 	/*
8140Sstevel@tonic-gate 	 * clear the interrupt active mask
8150Sstevel@tonic-gate 	 */
8169489SJoe.Bonasera@sun.com 	CPU->cpu_intr_actv &= ~(1 << (XC_SYS_PIL - 1));
8170Sstevel@tonic-gate 	set_base_spl();
8180Sstevel@tonic-gate 	(void) spl0();
8190Sstevel@tonic-gate 	affinity_clear();
8200Sstevel@tonic-gate }
8210Sstevel@tonic-gate 
8220Sstevel@tonic-gate static void
8230Sstevel@tonic-gate mach_get_platform(int owner)
8240Sstevel@tonic-gate {
8250Sstevel@tonic-gate 	void		**srv_opsp;
8260Sstevel@tonic-gate 	void		**clt_opsp;
8270Sstevel@tonic-gate 	int		i;
8280Sstevel@tonic-gate 	int		total_ops;
8290Sstevel@tonic-gate 
8300Sstevel@tonic-gate 	/* fix up psm ops */
8310Sstevel@tonic-gate 	srv_opsp = (void **)mach_set[0];
8320Sstevel@tonic-gate 	clt_opsp = (void **)mach_set[owner];
8330Sstevel@tonic-gate 	if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01)
8340Sstevel@tonic-gate 		total_ops = sizeof (struct psm_ops_ver01) /
8354481Sbholler 		    sizeof (void (*)(void));
8360Sstevel@tonic-gate 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_1)
8370Sstevel@tonic-gate 		/* no psm_notify_func */
8380Sstevel@tonic-gate 		total_ops = OFFSETOF(struct psm_ops, psm_notify_func) /
8390Sstevel@tonic-gate 		    sizeof (void (*)(void));
8400Sstevel@tonic-gate 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_2)
8410Sstevel@tonic-gate 		/* no psm_timer funcs */
8420Sstevel@tonic-gate 		total_ops = OFFSETOF(struct psm_ops, psm_timer_reprogram) /
8430Sstevel@tonic-gate 		    sizeof (void (*)(void));
8440Sstevel@tonic-gate 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_3)
8450Sstevel@tonic-gate 		/* no psm_preshutdown function */
8460Sstevel@tonic-gate 		total_ops = OFFSETOF(struct psm_ops, psm_preshutdown) /
8470Sstevel@tonic-gate 		    sizeof (void (*)(void));
8480Sstevel@tonic-gate 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_4)
8490Sstevel@tonic-gate 		/* no psm_preshutdown function */
8500Sstevel@tonic-gate 		total_ops = OFFSETOF(struct psm_ops, psm_intr_ops) /
8510Sstevel@tonic-gate 		    sizeof (void (*)(void));
8520Sstevel@tonic-gate 	else
8530Sstevel@tonic-gate 		total_ops = sizeof (struct psm_ops) / sizeof (void (*)(void));
8540Sstevel@tonic-gate 
8550Sstevel@tonic-gate 	/*
8560Sstevel@tonic-gate 	 * Save the version of the PSM module, in case we need to
8579637SRandy.Fishel@Sun.COM 	 * behave differently based on version.
8580Sstevel@tonic-gate 	 */
8590Sstevel@tonic-gate 	mach_ver[0] = mach_ver[owner];
8600Sstevel@tonic-gate 
8610Sstevel@tonic-gate 	for (i = 0; i < total_ops; i++)
8620Sstevel@tonic-gate 		if (clt_opsp[i] != NULL)
8630Sstevel@tonic-gate 			srv_opsp[i] = clt_opsp[i];
8640Sstevel@tonic-gate }
8650Sstevel@tonic-gate 
8660Sstevel@tonic-gate static void
8670Sstevel@tonic-gate mach_construct_info()
8680Sstevel@tonic-gate {
8693446Smrj 	struct psm_sw *swp;
8700Sstevel@tonic-gate 	int	mach_cnt[PSM_OWN_OVERRIDE+1] = {0};
8710Sstevel@tonic-gate 	int	conflict_owner = 0;
8720Sstevel@tonic-gate 
8730Sstevel@tonic-gate 	if (psmsw->psw_forw == psmsw)
8740Sstevel@tonic-gate 		panic("No valid PSM modules found");
8750Sstevel@tonic-gate 	mutex_enter(&psmsw_lock);
8760Sstevel@tonic-gate 	for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) {
8770Sstevel@tonic-gate 		if (!(swp->psw_flag & PSM_MOD_IDENTIFY))
8780Sstevel@tonic-gate 			continue;
8790Sstevel@tonic-gate 		mach_set[swp->psw_infop->p_owner] = swp->psw_infop->p_ops;
8800Sstevel@tonic-gate 		mach_ver[swp->psw_infop->p_owner] = swp->psw_infop->p_version;
8810Sstevel@tonic-gate 		mach_cnt[swp->psw_infop->p_owner]++;
8820Sstevel@tonic-gate 	}
8830Sstevel@tonic-gate 	mutex_exit(&psmsw_lock);
8840Sstevel@tonic-gate 
8850Sstevel@tonic-gate 	mach_get_platform(PSM_OWN_SYS_DEFAULT);
8860Sstevel@tonic-gate 
8870Sstevel@tonic-gate 	/* check to see are there any conflicts */
8880Sstevel@tonic-gate 	if (mach_cnt[PSM_OWN_EXCLUSIVE] > 1)
8890Sstevel@tonic-gate 		conflict_owner = PSM_OWN_EXCLUSIVE;
8900Sstevel@tonic-gate 	if (mach_cnt[PSM_OWN_OVERRIDE] > 1)
8910Sstevel@tonic-gate 		conflict_owner = PSM_OWN_OVERRIDE;
8920Sstevel@tonic-gate 	if (conflict_owner) {
8930Sstevel@tonic-gate 		/* remove all psm modules except uppc */
8940Sstevel@tonic-gate 		cmn_err(CE_WARN,
8954481Sbholler 		    "Conflicts detected on the following PSM modules:");
8960Sstevel@tonic-gate 		mutex_enter(&psmsw_lock);
8970Sstevel@tonic-gate 		for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) {
8980Sstevel@tonic-gate 			if (swp->psw_infop->p_owner == conflict_owner)
8990Sstevel@tonic-gate 				cmn_err(CE_WARN, "%s ",
9004481Sbholler 				    swp->psw_infop->p_mach_idstring);
9010Sstevel@tonic-gate 		}
9020Sstevel@tonic-gate 		mutex_exit(&psmsw_lock);
9030Sstevel@tonic-gate 		cmn_err(CE_WARN,
9044481Sbholler 		    "Setting the system back to SINGLE processor mode!");
9050Sstevel@tonic-gate 		cmn_err(CE_WARN,
9060Sstevel@tonic-gate 		    "Please edit /etc/mach to remove the invalid PSM module.");
9070Sstevel@tonic-gate 		return;
9080Sstevel@tonic-gate 	}
9090Sstevel@tonic-gate 
9100Sstevel@tonic-gate 	if (mach_set[PSM_OWN_EXCLUSIVE])
9110Sstevel@tonic-gate 		mach_get_platform(PSM_OWN_EXCLUSIVE);
9120Sstevel@tonic-gate 
9130Sstevel@tonic-gate 	if (mach_set[PSM_OWN_OVERRIDE])
9140Sstevel@tonic-gate 		mach_get_platform(PSM_OWN_OVERRIDE);
9150Sstevel@tonic-gate }
9160Sstevel@tonic-gate 
9170Sstevel@tonic-gate static void
9180Sstevel@tonic-gate mach_init()
9190Sstevel@tonic-gate {
9203446Smrj 	struct psm_ops  *pops;
9210Sstevel@tonic-gate 
9220Sstevel@tonic-gate 	mach_construct_info();
9230Sstevel@tonic-gate 
9240Sstevel@tonic-gate 	pops = mach_set[0];
9250Sstevel@tonic-gate 
9260Sstevel@tonic-gate 	/* register the interrupt and clock initialization rotuines */
9270Sstevel@tonic-gate 	picinitf = mach_picinit;
9280Sstevel@tonic-gate 	clkinitf = mach_clkinit;
9290Sstevel@tonic-gate 	psm_get_clockirq = pops->psm_get_clockirq;
9300Sstevel@tonic-gate 
9310Sstevel@tonic-gate 	/* register the interrupt setup code */
9320Sstevel@tonic-gate 	slvltovect = mach_softlvl_to_vect;
9330Sstevel@tonic-gate 	addspl	= pops->psm_addspl;
9340Sstevel@tonic-gate 	delspl	= pops->psm_delspl;
9350Sstevel@tonic-gate 
9360Sstevel@tonic-gate 	if (pops->psm_translate_irq)
9370Sstevel@tonic-gate 		psm_translate_irq = pops->psm_translate_irq;
9380Sstevel@tonic-gate 	if (pops->psm_intr_ops)
9390Sstevel@tonic-gate 		psm_intr_ops = pops->psm_intr_ops;
9403446Smrj 
9413446Smrj #if defined(PSMI_1_2) || defined(PSMI_1_3) || defined(PSMI_1_4)
9423446Smrj 	/*
9433446Smrj 	 * Time-of-day functionality now handled in TOD modules.
9443446Smrj 	 * (Warn about PSM modules that think that we're going to use
9453446Smrj 	 * their ops vectors.)
9463446Smrj 	 */
9473446Smrj 	if (pops->psm_tod_get)
9483446Smrj 		cmn_err(CE_WARN, "obsolete psm_tod_get op %p",
9493446Smrj 		    (void *)pops->psm_tod_get);
9503446Smrj 
9513446Smrj 	if (pops->psm_tod_set)
9523446Smrj 		cmn_err(CE_WARN, "obsolete psm_tod_set op %p",
9533446Smrj 		    (void *)pops->psm_tod_set);
9543446Smrj #endif
9553446Smrj 
9560Sstevel@tonic-gate 	if (pops->psm_notify_error) {
9570Sstevel@tonic-gate 		psm_notify_error = mach_notify_error;
9580Sstevel@tonic-gate 		notify_error = pops->psm_notify_error;
9590Sstevel@tonic-gate 	}
9600Sstevel@tonic-gate 
9610Sstevel@tonic-gate 	(*pops->psm_softinit)();
9620Sstevel@tonic-gate 
9630Sstevel@tonic-gate 	/*
9648906SEric.Saxe@Sun.COM 	 * Initialize the dispatcher's function hooks to enable CPU halting
9658906SEric.Saxe@Sun.COM 	 * when idle.  Set both the deep-idle and non-deep-idle hooks.
9668906SEric.Saxe@Sun.COM 	 *
9678906SEric.Saxe@Sun.COM 	 * Assume we can use power saving deep-idle loop cpu_idle_adaptive.
9688906SEric.Saxe@Sun.COM 	 * Platform deep-idle driver will reset our idle loop to
9698906SEric.Saxe@Sun.COM 	 * non_deep_idle_cpu if power saving deep-idle feature is not available.
9708906SEric.Saxe@Sun.COM 	 *
9715045Sbholler 	 * Do not use monitor/mwait if idle_cpu_use_hlt is not set(spin idle)
9725045Sbholler 	 * or idle_cpu_prefer_mwait is not set.
9734481Sbholler 	 * Allocate monitor/mwait buffer for cpu0.
9740Sstevel@tonic-gate 	 */
9758906SEric.Saxe@Sun.COM #ifndef __xpv
9768906SEric.Saxe@Sun.COM 	non_deep_idle_disp_enq_thread = disp_enq_thread;
9778906SEric.Saxe@Sun.COM #endif
9784481Sbholler 	if (idle_cpu_use_hlt) {
9798906SEric.Saxe@Sun.COM 		idle_cpu = cpu_idle_adaptive;
9808906SEric.Saxe@Sun.COM 		CPU->cpu_m.mcpu_idle_cpu = cpu_idle;
9815084Sjohnlev #ifndef __xpv
9824481Sbholler 		if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait) {
9835045Sbholler 			CPU->cpu_m.mcpu_mwait = cpuid_mwait_alloc(CPU);
9845045Sbholler 			/*
9855045Sbholler 			 * Protect ourself from insane mwait size.
9865045Sbholler 			 */
9875045Sbholler 			if (CPU->cpu_m.mcpu_mwait == NULL) {
9885045Sbholler #ifdef DEBUG
9895045Sbholler 				cmn_err(CE_NOTE, "Using hlt idle.  Cannot "
9905045Sbholler 				    "handle cpu 0 mwait size.");
9915045Sbholler #endif
9925045Sbholler 				idle_cpu_prefer_mwait = 0;
9938906SEric.Saxe@Sun.COM 				CPU->cpu_m.mcpu_idle_cpu = cpu_idle;
9945045Sbholler 			} else {
9958906SEric.Saxe@Sun.COM 				CPU->cpu_m.mcpu_idle_cpu = cpu_idle_mwait;
9965045Sbholler 			}
9974481Sbholler 		} else {
9988906SEric.Saxe@Sun.COM 			CPU->cpu_m.mcpu_idle_cpu = cpu_idle;
9998906SEric.Saxe@Sun.COM 		}
10008906SEric.Saxe@Sun.COM 		non_deep_idle_cpu = CPU->cpu_m.mcpu_idle_cpu;
10018906SEric.Saxe@Sun.COM 
10028906SEric.Saxe@Sun.COM 		/*
10038906SEric.Saxe@Sun.COM 		 * Disable power saving deep idle loop?
10048906SEric.Saxe@Sun.COM 		 */
10058906SEric.Saxe@Sun.COM 		if (idle_cpu_no_deep_c) {
10068906SEric.Saxe@Sun.COM 			idle_cpu = non_deep_idle_cpu;
10074481Sbholler 		}
10085084Sjohnlev #endif
10094481Sbholler 	}
10100Sstevel@tonic-gate 
10110Sstevel@tonic-gate 	mach_smpinit();
10120Sstevel@tonic-gate }
10130Sstevel@tonic-gate 
10140Sstevel@tonic-gate static void
10150Sstevel@tonic-gate mach_smpinit(void)
10160Sstevel@tonic-gate {
10172006Sandrei 	struct psm_ops  *pops;
10182006Sandrei 	processorid_t cpu_id;
10192006Sandrei 	int cnt;
10202006Sandrei 	cpuset_t cpumask;
10210Sstevel@tonic-gate 
10220Sstevel@tonic-gate 	pops = mach_set[0];
10236336Sbholler 	CPUSET_ZERO(cpumask);
10240Sstevel@tonic-gate 
10250Sstevel@tonic-gate 	cpu_id = -1;
10260Sstevel@tonic-gate 	cpu_id = (*pops->psm_get_next_processorid)(cpu_id);
10276336Sbholler 	for (cnt = 0; cpu_id != -1; cnt++) {
10282006Sandrei 		CPUSET_ADD(cpumask, cpu_id);
10290Sstevel@tonic-gate 		cpu_id = (*pops->psm_get_next_processorid)(cpu_id);
10300Sstevel@tonic-gate 	}
10310Sstevel@tonic-gate 
10320Sstevel@tonic-gate 	mp_cpus = cpumask;
10330Sstevel@tonic-gate 
10340Sstevel@tonic-gate 	/* MP related routines */
10350Sstevel@tonic-gate 	ap_mlsetup = pops->psm_post_cpu_start;
10360Sstevel@tonic-gate 	send_dirintf = pops->psm_send_ipi;
10370Sstevel@tonic-gate 
10380Sstevel@tonic-gate 	/* optional MP related routines */
10390Sstevel@tonic-gate 	if (pops->psm_shutdown)
10400Sstevel@tonic-gate 		psm_shutdownf = pops->psm_shutdown;
10410Sstevel@tonic-gate 	if (pops->psm_preshutdown)
10420Sstevel@tonic-gate 		psm_preshutdownf = pops->psm_preshutdown;
10430Sstevel@tonic-gate 	if (pops->psm_notify_func)
10440Sstevel@tonic-gate 		psm_notifyf = pops->psm_notify_func;
10450Sstevel@tonic-gate 	if (pops->psm_set_idlecpu)
10460Sstevel@tonic-gate 		psm_set_idle_cpuf = pops->psm_set_idlecpu;
10470Sstevel@tonic-gate 	if (pops->psm_unset_idlecpu)
10480Sstevel@tonic-gate 		psm_unset_idle_cpuf = pops->psm_unset_idlecpu;
10490Sstevel@tonic-gate 
10500Sstevel@tonic-gate 	psm_clkinit = pops->psm_clkinit;
10510Sstevel@tonic-gate 
10520Sstevel@tonic-gate 	if (pops->psm_timer_reprogram)
10530Sstevel@tonic-gate 		psm_timer_reprogram = pops->psm_timer_reprogram;
10540Sstevel@tonic-gate 
10550Sstevel@tonic-gate 	if (pops->psm_timer_enable)
10560Sstevel@tonic-gate 		psm_timer_enable = pops->psm_timer_enable;
10570Sstevel@tonic-gate 
10580Sstevel@tonic-gate 	if (pops->psm_timer_disable)
10590Sstevel@tonic-gate 		psm_timer_disable = pops->psm_timer_disable;
10600Sstevel@tonic-gate 
10610Sstevel@tonic-gate 	if (pops->psm_post_cyclic_setup)
10620Sstevel@tonic-gate 		psm_post_cyclic_setup = pops->psm_post_cyclic_setup;
10630Sstevel@tonic-gate 
10645295Srandyf 	if (pops->psm_state)
10655295Srandyf 		psm_state = pops->psm_state;
10665295Srandyf 
10677113Sbholler 	/*
10687113Sbholler 	 * Set these vectors here so they can be used by Suspend/Resume
10697113Sbholler 	 * on UP machines.
10707113Sbholler 	 */
10717113Sbholler 	if (pops->psm_disable_intr)
10727113Sbholler 		psm_disable_intr = pops->psm_disable_intr;
10737113Sbholler 	if (pops->psm_enable_intr)
10747113Sbholler 		psm_enable_intr  = pops->psm_enable_intr;
10757113Sbholler 
10767113Sbholler 	/* check for multiple CPUs */
10770Sstevel@tonic-gate 	if (cnt < 2)
10780Sstevel@tonic-gate 		return;
10790Sstevel@tonic-gate 
10800Sstevel@tonic-gate 	/* check for MP platforms */
10810Sstevel@tonic-gate 	if (pops->psm_cpu_start == NULL)
10820Sstevel@tonic-gate 		return;
10830Sstevel@tonic-gate 
10840Sstevel@tonic-gate 	/*
10850Sstevel@tonic-gate 	 * Set the dispatcher hook to enable cpu "wake up"
10860Sstevel@tonic-gate 	 * when a thread becomes runnable.
10870Sstevel@tonic-gate 	 */
10885084Sjohnlev 	if (idle_cpu_use_hlt) {
10895084Sjohnlev 		disp_enq_thread = cpu_wakeup;
10905084Sjohnlev #ifndef __xpv
10914481Sbholler 		if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait)
10924481Sbholler 			disp_enq_thread = cpu_wakeup_mwait;
10938906SEric.Saxe@Sun.COM 		non_deep_idle_disp_enq_thread = disp_enq_thread;
10945084Sjohnlev #endif
10955084Sjohnlev 	}
10960Sstevel@tonic-gate 
10970Sstevel@tonic-gate 	psm_get_ipivect = pops->psm_get_ipivect;
10980Sstevel@tonic-gate 
10999489SJoe.Bonasera@sun.com 	(void) add_avintr((void *)NULL, XC_HI_PIL, xc_serv, "xc_intr",
11004481Sbholler 	    (*pops->psm_get_ipivect)(XC_HI_PIL, PSM_INTR_IPI_HI),
11019489SJoe.Bonasera@sun.com 	    NULL, NULL, NULL, NULL);
11020Sstevel@tonic-gate 
11030Sstevel@tonic-gate 	(void) (*pops->psm_get_ipivect)(XC_CPUPOKE_PIL, PSM_INTR_POKE);
11040Sstevel@tonic-gate }
11050Sstevel@tonic-gate 
11060Sstevel@tonic-gate static void
11070Sstevel@tonic-gate mach_picinit()
11080Sstevel@tonic-gate {
11092006Sandrei 	struct psm_ops  *pops;
11100Sstevel@tonic-gate 
11110Sstevel@tonic-gate 	pops = mach_set[0];
11120Sstevel@tonic-gate 
11130Sstevel@tonic-gate 	/* register the interrupt handlers */
11140Sstevel@tonic-gate 	setlvl = pops->psm_intr_enter;
11150Sstevel@tonic-gate 	setlvlx = pops->psm_intr_exit;
11160Sstevel@tonic-gate 
11170Sstevel@tonic-gate 	/* initialize the interrupt hardware */
11180Sstevel@tonic-gate 	(*pops->psm_picinit)();
11190Sstevel@tonic-gate 
11200Sstevel@tonic-gate 	/* set interrupt mask for current ipl */
11210Sstevel@tonic-gate 	setspl = pops->psm_setspl;
11223446Smrj 	cli();
11230Sstevel@tonic-gate 	setspl(CPU->cpu_pri);
11240Sstevel@tonic-gate }
11250Sstevel@tonic-gate 
11260Sstevel@tonic-gate uint_t	cpu_freq;	/* MHz */
11270Sstevel@tonic-gate uint64_t cpu_freq_hz;	/* measured (in hertz) */
11280Sstevel@tonic-gate 
11290Sstevel@tonic-gate #define	MEGA_HZ		1000000
11300Sstevel@tonic-gate 
11315084Sjohnlev #ifdef __xpv
11325084Sjohnlev 
11335084Sjohnlev int xpv_cpufreq_workaround = 1;
11345084Sjohnlev int xpv_cpufreq_verbose = 0;
11355084Sjohnlev 
11365084Sjohnlev #else	/* __xpv */
11375084Sjohnlev 
11380Sstevel@tonic-gate static uint64_t
11390Sstevel@tonic-gate mach_calchz(uint32_t pit_counter, uint64_t *processor_clks)
11400Sstevel@tonic-gate {
11410Sstevel@tonic-gate 	uint64_t cpu_hz;
11420Sstevel@tonic-gate 
11430Sstevel@tonic-gate 	if ((pit_counter == 0) || (*processor_clks == 0) ||
11440Sstevel@tonic-gate 	    (*processor_clks > (((uint64_t)-1) / PIT_HZ)))
11450Sstevel@tonic-gate 		return (0);
11460Sstevel@tonic-gate 
11470Sstevel@tonic-gate 	cpu_hz = ((uint64_t)PIT_HZ * *processor_clks) / pit_counter;
11480Sstevel@tonic-gate 
11490Sstevel@tonic-gate 	return (cpu_hz);
11500Sstevel@tonic-gate }
11510Sstevel@tonic-gate 
11525084Sjohnlev #endif	/* __xpv */
11535084Sjohnlev 
11540Sstevel@tonic-gate static uint64_t
11550Sstevel@tonic-gate mach_getcpufreq(void)
11560Sstevel@tonic-gate {
11575084Sjohnlev #if defined(__xpv)
11585084Sjohnlev 	vcpu_time_info_t *vti = &CPU->cpu_m.mcpu_vcpu_info->time;
11595084Sjohnlev 	uint64_t cpu_hz;
11605084Sjohnlev 
11615084Sjohnlev 	/*
11625084Sjohnlev 	 * During dom0 bringup, it was noted that on at least one older
11635084Sjohnlev 	 * Intel HT machine, the hypervisor initially gives a tsc_to_system_mul
11645084Sjohnlev 	 * value that is quite wrong (the 3.06GHz clock was reported
11655084Sjohnlev 	 * as 4.77GHz)
11665084Sjohnlev 	 *
11675084Sjohnlev 	 * The curious thing is, that if you stop the kernel at entry,
11685084Sjohnlev 	 * breakpoint here and inspect the value with kmdb, the value
11695084Sjohnlev 	 * is correct - but if you don't stop and simply enable the
11705084Sjohnlev 	 * printf statement (below), you can see the bad value printed
11715084Sjohnlev 	 * here.  Almost as if something kmdb did caused the hypervisor to
11725084Sjohnlev 	 * figure it out correctly.  And, note that the hypervisor
11735084Sjohnlev 	 * eventually -does- figure it out correctly ... if you look at
11745084Sjohnlev 	 * the field later in the life of dom0, it is correct.
11755084Sjohnlev 	 *
11765084Sjohnlev 	 * For now, on dom0, we employ a slightly cheesy workaround of
11775084Sjohnlev 	 * using the DOM0_PHYSINFO hypercall.
11785084Sjohnlev 	 */
11795084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info) && xpv_cpufreq_workaround) {
11805084Sjohnlev 		xen_sysctl_t op0, *op = &op0;
11815084Sjohnlev 
11825084Sjohnlev 		op->cmd = XEN_SYSCTL_physinfo;
11835084Sjohnlev 		op->interface_version = XEN_SYSCTL_INTERFACE_VERSION;
11845084Sjohnlev 		if (HYPERVISOR_sysctl(op) != 0)
11855084Sjohnlev 			panic("physinfo op refused");
11865084Sjohnlev 
11875084Sjohnlev 		cpu_hz = 1000 * (uint64_t)op->u.physinfo.cpu_khz;
11885084Sjohnlev 	} else {
11895084Sjohnlev 		cpu_hz = (UINT64_C(1000000000) << 32) / vti->tsc_to_system_mul;
11905084Sjohnlev 
11915084Sjohnlev 		if (vti->tsc_shift < 0)
11925084Sjohnlev 			cpu_hz <<= -vti->tsc_shift;
11935084Sjohnlev 		else
11945084Sjohnlev 			cpu_hz >>= vti->tsc_shift;
11955084Sjohnlev 	}
11965084Sjohnlev 
11975084Sjohnlev 	if (xpv_cpufreq_verbose)
11985084Sjohnlev 		printf("mach_getcpufreq: system_mul 0x%x, shift %d, "
11995084Sjohnlev 		    "cpu_hz %" PRId64 "Hz\n",
12005084Sjohnlev 		    vti->tsc_to_system_mul, vti->tsc_shift, cpu_hz);
12015084Sjohnlev 
12025084Sjohnlev 	return (cpu_hz);
12035084Sjohnlev #else	/* __xpv */
12040Sstevel@tonic-gate 	uint32_t pit_counter;
12050Sstevel@tonic-gate 	uint64_t processor_clks;
12060Sstevel@tonic-gate 
12070Sstevel@tonic-gate 	if (x86_feature & X86_TSC) {
12080Sstevel@tonic-gate 		/*
12090Sstevel@tonic-gate 		 * We have a TSC. freq_tsc() knows how to measure the number
12100Sstevel@tonic-gate 		 * of clock cycles sampled against the PIT.
12110Sstevel@tonic-gate 		 */
12123446Smrj 		ulong_t flags = clear_int_flag();
12130Sstevel@tonic-gate 		processor_clks = freq_tsc(&pit_counter);
12143446Smrj 		restore_int_flag(flags);
12150Sstevel@tonic-gate 		return (mach_calchz(pit_counter, &processor_clks));
12160Sstevel@tonic-gate 	} else if (x86_vendor == X86_VENDOR_Cyrix || x86_type == X86_TYPE_P5) {
12170Sstevel@tonic-gate #if defined(__amd64)
12180Sstevel@tonic-gate 		panic("mach_getcpufreq: no TSC!");
12190Sstevel@tonic-gate #elif defined(__i386)
12200Sstevel@tonic-gate 		/*
12210Sstevel@tonic-gate 		 * We are a Cyrix based on a 6x86 core or an Intel Pentium
12220Sstevel@tonic-gate 		 * for which freq_notsc() knows how to measure the number of
12230Sstevel@tonic-gate 		 * elapsed clock cycles sampled against the PIT
12240Sstevel@tonic-gate 		 */
12253446Smrj 		ulong_t flags = clear_int_flag();
12260Sstevel@tonic-gate 		processor_clks = freq_notsc(&pit_counter);
12273446Smrj 		restore_int_flag(flags);
12280Sstevel@tonic-gate 		return (mach_calchz(pit_counter, &processor_clks));
12290Sstevel@tonic-gate #endif	/* __i386 */
12300Sstevel@tonic-gate 	}
12310Sstevel@tonic-gate 
12320Sstevel@tonic-gate 	/* We do not know how to calculate cpu frequency for this cpu. */
12330Sstevel@tonic-gate 	return (0);
12345084Sjohnlev #endif	/* __xpv */
12350Sstevel@tonic-gate }
12360Sstevel@tonic-gate 
12370Sstevel@tonic-gate /*
12380Sstevel@tonic-gate  * If the clock speed of a cpu is found to be reported incorrectly, do not add
12390Sstevel@tonic-gate  * to this array, instead improve the accuracy of the algorithm that determines
12400Sstevel@tonic-gate  * the clock speed of the processor or extend the implementation to support the
12410Sstevel@tonic-gate  * vendor as appropriate. This is here only to support adjusting the speed on
12420Sstevel@tonic-gate  * older slower processors that mach_fixcpufreq() would not be able to account
12430Sstevel@tonic-gate  * for otherwise.
12440Sstevel@tonic-gate  */
12450Sstevel@tonic-gate static int x86_cpu_freq[] = { 60, 75, 80, 90, 120, 160, 166, 175, 180, 233 };
12460Sstevel@tonic-gate 
12470Sstevel@tonic-gate /*
12480Sstevel@tonic-gate  * On fast processors the clock frequency that is measured may be off by
12490Sstevel@tonic-gate  * a few MHz from the value printed on the part. This is a combination of
12500Sstevel@tonic-gate  * the factors that for such fast parts being off by this much is within
12510Sstevel@tonic-gate  * the tolerances for manufacture and because of the difficulties in the
12520Sstevel@tonic-gate  * measurement that can lead to small error. This function uses some
12530Sstevel@tonic-gate  * heuristics in order to tweak the value that was measured to match what
12540Sstevel@tonic-gate  * is most likely printed on the part.
12550Sstevel@tonic-gate  *
12560Sstevel@tonic-gate  * Some examples:
12570Sstevel@tonic-gate  * 	AMD Athlon 1000 mhz measured as 998 mhz
12580Sstevel@tonic-gate  * 	Intel Pentium III Xeon 733 mhz measured as 731 mhz
12590Sstevel@tonic-gate  * 	Intel Pentium IV 1500 mhz measured as 1495mhz
12600Sstevel@tonic-gate  *
12610Sstevel@tonic-gate  * If in the future this function is no longer sufficient to correct
12620Sstevel@tonic-gate  * for the error in the measurement, then the algorithm used to perform
12630Sstevel@tonic-gate  * the measurement will have to be improved in order to increase accuracy
12640Sstevel@tonic-gate  * rather than adding horrible and questionable kludges here.
12650Sstevel@tonic-gate  *
12660Sstevel@tonic-gate  * This is called after the cyclics subsystem because of the potential
12670Sstevel@tonic-gate  * that the heuristics within may give a worse estimate of the clock
12680Sstevel@tonic-gate  * frequency than the value that was measured.
12690Sstevel@tonic-gate  */
12700Sstevel@tonic-gate static void
12710Sstevel@tonic-gate mach_fixcpufreq(void)
12720Sstevel@tonic-gate {
12730Sstevel@tonic-gate 	uint32_t freq, mul, near66, delta66, near50, delta50, fixed, delta, i;
12740Sstevel@tonic-gate 
12750Sstevel@tonic-gate 	freq = (uint32_t)cpu_freq;
12760Sstevel@tonic-gate 
12770Sstevel@tonic-gate 	/*
12780Sstevel@tonic-gate 	 * Find the nearest integer multiple of 200/3 (about 66) MHz to the
12790Sstevel@tonic-gate 	 * measured speed taking into account that the 667 MHz parts were
12800Sstevel@tonic-gate 	 * the first to round-up.
12810Sstevel@tonic-gate 	 */
12820Sstevel@tonic-gate 	mul = (uint32_t)((3 * (uint64_t)freq + 100) / 200);
12830Sstevel@tonic-gate 	near66 = (uint32_t)((200 * (uint64_t)mul + ((mul >= 10) ? 1 : 0)) / 3);
12840Sstevel@tonic-gate 	delta66 = (near66 > freq) ? (near66 - freq) : (freq - near66);
12850Sstevel@tonic-gate 
12860Sstevel@tonic-gate 	/* Find the nearest integer multiple of 50 MHz to the measured speed */
12870Sstevel@tonic-gate 	mul = (freq + 25) / 50;
12880Sstevel@tonic-gate 	near50 = mul * 50;
12890Sstevel@tonic-gate 	delta50 = (near50 > freq) ? (near50 - freq) : (freq - near50);
12900Sstevel@tonic-gate 
12910Sstevel@tonic-gate 	/* Find the closer of the two */
12920Sstevel@tonic-gate 	if (delta66 < delta50) {
12930Sstevel@tonic-gate 		fixed = near66;
12940Sstevel@tonic-gate 		delta = delta66;
12950Sstevel@tonic-gate 	} else {
12960Sstevel@tonic-gate 		fixed = near50;
12970Sstevel@tonic-gate 		delta = delta50;
12980Sstevel@tonic-gate 	}
12990Sstevel@tonic-gate 
13000Sstevel@tonic-gate 	if (fixed > INT_MAX)
13010Sstevel@tonic-gate 		return;
13020Sstevel@tonic-gate 
13030Sstevel@tonic-gate 	/*
13040Sstevel@tonic-gate 	 * Some older parts have a core clock frequency that is not an
13050Sstevel@tonic-gate 	 * integral multiple of 50 or 66 MHz. Check if one of the old
13060Sstevel@tonic-gate 	 * clock frequencies is closer to the measured value than any
13070Sstevel@tonic-gate 	 * of the integral multiples of 50 an 66, and if so set fixed
13080Sstevel@tonic-gate 	 * and delta appropriately to represent the closest value.
13090Sstevel@tonic-gate 	 */
13100Sstevel@tonic-gate 	i = sizeof (x86_cpu_freq) / sizeof (int);
13110Sstevel@tonic-gate 	while (i > 0) {
13120Sstevel@tonic-gate 		i--;
13130Sstevel@tonic-gate 
13140Sstevel@tonic-gate 		if (x86_cpu_freq[i] <= freq) {
13150Sstevel@tonic-gate 			mul = freq - x86_cpu_freq[i];
13160Sstevel@tonic-gate 
13170Sstevel@tonic-gate 			if (mul < delta) {
13180Sstevel@tonic-gate 				fixed = x86_cpu_freq[i];
13190Sstevel@tonic-gate 				delta = mul;
13200Sstevel@tonic-gate 			}
13210Sstevel@tonic-gate 
13220Sstevel@tonic-gate 			break;
13230Sstevel@tonic-gate 		}
13240Sstevel@tonic-gate 
13250Sstevel@tonic-gate 		mul = x86_cpu_freq[i] - freq;
13260Sstevel@tonic-gate 
13270Sstevel@tonic-gate 		if (mul < delta) {
13280Sstevel@tonic-gate 			fixed = x86_cpu_freq[i];
13290Sstevel@tonic-gate 			delta = mul;
13300Sstevel@tonic-gate 		}
13310Sstevel@tonic-gate 	}
13320Sstevel@tonic-gate 
13330Sstevel@tonic-gate 	/*
13340Sstevel@tonic-gate 	 * Set a reasonable maximum for how much to correct the measured
13350Sstevel@tonic-gate 	 * result by. This check is here to prevent the adjustment made
13360Sstevel@tonic-gate 	 * by this function from being more harm than good. It is entirely
13370Sstevel@tonic-gate 	 * possible that in the future parts will be made that are not
13380Sstevel@tonic-gate 	 * integral multiples of 66 or 50 in clock frequency or that
13390Sstevel@tonic-gate 	 * someone may overclock a part to some odd frequency. If the
13400Sstevel@tonic-gate 	 * measured value is farther from the corrected value than
13410Sstevel@tonic-gate 	 * allowed, then assume the corrected value is in error and use
13420Sstevel@tonic-gate 	 * the measured value.
13430Sstevel@tonic-gate 	 */
13440Sstevel@tonic-gate 	if (6 < delta)
13450Sstevel@tonic-gate 		return;
13460Sstevel@tonic-gate 
13470Sstevel@tonic-gate 	cpu_freq = (int)fixed;
13480Sstevel@tonic-gate }
13490Sstevel@tonic-gate 
13500Sstevel@tonic-gate 
13510Sstevel@tonic-gate static int
13520Sstevel@tonic-gate machhztomhz(uint64_t cpu_freq_hz)
13530Sstevel@tonic-gate {
13540Sstevel@tonic-gate 	uint64_t cpu_mhz;
13550Sstevel@tonic-gate 
13560Sstevel@tonic-gate 	/* Round to nearest MHZ */
13570Sstevel@tonic-gate 	cpu_mhz = (cpu_freq_hz + (MEGA_HZ / 2)) / MEGA_HZ;
13580Sstevel@tonic-gate 
13590Sstevel@tonic-gate 	if (cpu_mhz > INT_MAX)
13600Sstevel@tonic-gate 		return (0);
13610Sstevel@tonic-gate 
13620Sstevel@tonic-gate 	return ((int)cpu_mhz);
13630Sstevel@tonic-gate 
13640Sstevel@tonic-gate }
13650Sstevel@tonic-gate 
13660Sstevel@tonic-gate 
13670Sstevel@tonic-gate static int
13680Sstevel@tonic-gate mach_clkinit(int preferred_mode, int *set_mode)
13690Sstevel@tonic-gate {
13703446Smrj 	struct psm_ops  *pops;
13710Sstevel@tonic-gate 	int resolution;
13720Sstevel@tonic-gate 
13730Sstevel@tonic-gate 	pops = mach_set[0];
13740Sstevel@tonic-gate 
13750Sstevel@tonic-gate 	cpu_freq_hz = mach_getcpufreq();
13760Sstevel@tonic-gate 
13770Sstevel@tonic-gate 	cpu_freq = machhztomhz(cpu_freq_hz);
13780Sstevel@tonic-gate 
13790Sstevel@tonic-gate 	if (!(x86_feature & X86_TSC) || (cpu_freq == 0))
13800Sstevel@tonic-gate 		tsc_gethrtime_enable = 0;
13810Sstevel@tonic-gate 
13825084Sjohnlev #ifndef __xpv
13830Sstevel@tonic-gate 	if (tsc_gethrtime_enable) {
13840Sstevel@tonic-gate 		tsc_hrtimeinit(cpu_freq_hz);
13855084Sjohnlev 	} else
13865084Sjohnlev #endif
13875084Sjohnlev 	{
13880Sstevel@tonic-gate 		if (pops->psm_hrtimeinit)
13890Sstevel@tonic-gate 			(*pops->psm_hrtimeinit)();
13900Sstevel@tonic-gate 		gethrtimef = pops->psm_gethrtime;
13910Sstevel@tonic-gate 		gethrtimeunscaledf = gethrtimef;
13920Sstevel@tonic-gate 		/* scalehrtimef will remain dummy */
13930Sstevel@tonic-gate 	}
13940Sstevel@tonic-gate 
13950Sstevel@tonic-gate 	mach_fixcpufreq();
13960Sstevel@tonic-gate 
13970Sstevel@tonic-gate 	if (mach_ver[0] >= PSM_INFO_VER01_3) {
13985084Sjohnlev 		if (preferred_mode == TIMER_ONESHOT) {
13990Sstevel@tonic-gate 
14000Sstevel@tonic-gate 			resolution = (*pops->psm_clkinit)(0);
14010Sstevel@tonic-gate 			if (resolution != 0)  {
14020Sstevel@tonic-gate 				*set_mode = TIMER_ONESHOT;
14030Sstevel@tonic-gate 				return (resolution);
14040Sstevel@tonic-gate 			}
14050Sstevel@tonic-gate 		}
14060Sstevel@tonic-gate 
14070Sstevel@tonic-gate 		/*
14080Sstevel@tonic-gate 		 * either periodic mode was requested or could not set to
14090Sstevel@tonic-gate 		 * one-shot mode
14100Sstevel@tonic-gate 		 */
14110Sstevel@tonic-gate 		resolution = (*pops->psm_clkinit)(hz);
14120Sstevel@tonic-gate 		/*
14130Sstevel@tonic-gate 		 * psm should be able to do periodic, so we do not check
14140Sstevel@tonic-gate 		 * for return value of psm_clkinit here.
14150Sstevel@tonic-gate 		 */
14160Sstevel@tonic-gate 		*set_mode = TIMER_PERIODIC;
14170Sstevel@tonic-gate 		return (resolution);
14180Sstevel@tonic-gate 	} else {
14190Sstevel@tonic-gate 		/*
14200Sstevel@tonic-gate 		 * PSMI interface prior to PSMI_3 does not define a return
14210Sstevel@tonic-gate 		 * value for psm_clkinit, so the return value is ignored.
14220Sstevel@tonic-gate 		 */
14230Sstevel@tonic-gate 		(void) (*pops->psm_clkinit)(hz);
14240Sstevel@tonic-gate 		*set_mode = TIMER_PERIODIC;
14250Sstevel@tonic-gate 		return (nsec_per_tick);
14260Sstevel@tonic-gate 	}
14270Sstevel@tonic-gate }
14280Sstevel@tonic-gate 
14294652Scwb 
1430999Slq150181 /*ARGSUSED*/
14310Sstevel@tonic-gate static int
14323446Smrj mach_softlvl_to_vect(int ipl)
14330Sstevel@tonic-gate {
14344652Scwb 	setsoftint = av_set_softint_pending;
14354652Scwb 	kdisetsoftint = kdi_av_set_softint_pending;
14360Sstevel@tonic-gate 
14370Sstevel@tonic-gate 	return (PSM_SV_SOFTWARE);
14380Sstevel@tonic-gate }
14390Sstevel@tonic-gate 
14403446Smrj #ifdef DEBUG
14413446Smrj /*
14423446Smrj  * This is here to allow us to simulate cpus that refuse to start.
14433446Smrj  */
14443446Smrj cpuset_t cpufailset;
14453446Smrj #endif
14463446Smrj 
14473446Smrj int
14483446Smrj mach_cpu_start(struct cpu *cp, void *ctx)
14490Sstevel@tonic-gate {
14503446Smrj 	struct psm_ops *pops = mach_set[0];
14513446Smrj 	processorid_t id = cp->cpu_id;
14520Sstevel@tonic-gate 
14533446Smrj #ifdef DEBUG
14543446Smrj 	if (CPU_IN_SET(cpufailset, id))
14553446Smrj 		return (0);
14563446Smrj #endif
14573446Smrj 	return ((*pops->psm_cpu_start)(id, ctx));
14580Sstevel@tonic-gate }
14590Sstevel@tonic-gate 
14605295Srandyf int
14615295Srandyf mach_cpuid_start(processorid_t id, void *ctx)
14625295Srandyf {
14635295Srandyf 	struct psm_ops *pops = mach_set[0];
14645295Srandyf 
14655295Srandyf #ifdef DEBUG
14665295Srandyf 	if (CPU_IN_SET(cpufailset, id))
14675295Srandyf 		return (0);
14685295Srandyf #endif
14695295Srandyf 	return ((*pops->psm_cpu_start)(id, ctx));
14705295Srandyf }
14715295Srandyf 
1472*9652SMichael.Corcoran@Sun.COM /*
1473*9652SMichael.Corcoran@Sun.COM  * Default handler to create device node for CPU.
1474*9652SMichael.Corcoran@Sun.COM  * One reference count will be held on created device node.
1475*9652SMichael.Corcoran@Sun.COM  */
1476*9652SMichael.Corcoran@Sun.COM static int
1477*9652SMichael.Corcoran@Sun.COM mach_cpu_create_devinfo(cpu_t *cp, dev_info_t **dipp)
1478*9652SMichael.Corcoran@Sun.COM {
1479*9652SMichael.Corcoran@Sun.COM 	int rv, circ;
1480*9652SMichael.Corcoran@Sun.COM 	dev_info_t *dip;
1481*9652SMichael.Corcoran@Sun.COM 	static kmutex_t cpu_node_lock;
1482*9652SMichael.Corcoran@Sun.COM 	static dev_info_t *cpu_nex_devi = NULL;
1483*9652SMichael.Corcoran@Sun.COM 
1484*9652SMichael.Corcoran@Sun.COM 	ASSERT(cp != NULL);
1485*9652SMichael.Corcoran@Sun.COM 	ASSERT(dipp != NULL);
1486*9652SMichael.Corcoran@Sun.COM 	*dipp = NULL;
1487*9652SMichael.Corcoran@Sun.COM 
1488*9652SMichael.Corcoran@Sun.COM 	if (cpu_nex_devi == NULL) {
1489*9652SMichael.Corcoran@Sun.COM 		mutex_enter(&cpu_node_lock);
1490*9652SMichael.Corcoran@Sun.COM 		/* First check whether cpus exists. */
1491*9652SMichael.Corcoran@Sun.COM 		cpu_nex_devi = ddi_find_devinfo("cpus", -1, 0);
1492*9652SMichael.Corcoran@Sun.COM 		/* Create cpus if it doesn't exist. */
1493*9652SMichael.Corcoran@Sun.COM 		if (cpu_nex_devi == NULL) {
1494*9652SMichael.Corcoran@Sun.COM 			ndi_devi_enter(ddi_root_node(), &circ);
1495*9652SMichael.Corcoran@Sun.COM 			rv = ndi_devi_alloc(ddi_root_node(), "cpus",
1496*9652SMichael.Corcoran@Sun.COM 			    (pnode_t)DEVI_SID_NODEID, &dip);
1497*9652SMichael.Corcoran@Sun.COM 			if (rv != NDI_SUCCESS) {
1498*9652SMichael.Corcoran@Sun.COM 				mutex_exit(&cpu_node_lock);
1499*9652SMichael.Corcoran@Sun.COM 				cmn_err(CE_CONT,
1500*9652SMichael.Corcoran@Sun.COM 				    "?failed to create cpu nexus device.\n");
1501*9652SMichael.Corcoran@Sun.COM 				return (PSM_FAILURE);
1502*9652SMichael.Corcoran@Sun.COM 			}
1503*9652SMichael.Corcoran@Sun.COM 			ASSERT(dip != NULL);
1504*9652SMichael.Corcoran@Sun.COM 			(void) ndi_devi_online(dip, 0);
1505*9652SMichael.Corcoran@Sun.COM 			ndi_devi_exit(ddi_root_node(), circ);
1506*9652SMichael.Corcoran@Sun.COM 			cpu_nex_devi = dip;
1507*9652SMichael.Corcoran@Sun.COM 		}
1508*9652SMichael.Corcoran@Sun.COM 		mutex_exit(&cpu_node_lock);
1509*9652SMichael.Corcoran@Sun.COM 	}
1510*9652SMichael.Corcoran@Sun.COM 
1511*9652SMichael.Corcoran@Sun.COM 	/*
1512*9652SMichael.Corcoran@Sun.COM 	 * create a child node for cpu identified as 'cpu_id'
1513*9652SMichael.Corcoran@Sun.COM 	 */
1514*9652SMichael.Corcoran@Sun.COM 	ndi_devi_enter(cpu_nex_devi, &circ);
1515*9652SMichael.Corcoran@Sun.COM 	dip = ddi_add_child(cpu_nex_devi, "cpu", DEVI_SID_NODEID, cp->cpu_id);
1516*9652SMichael.Corcoran@Sun.COM 	if (dip == NULL) {
1517*9652SMichael.Corcoran@Sun.COM 		cmn_err(CE_CONT,
1518*9652SMichael.Corcoran@Sun.COM 		    "?failed to create device node for cpu%d.\n", cp->cpu_id);
1519*9652SMichael.Corcoran@Sun.COM 		rv = PSM_FAILURE;
1520*9652SMichael.Corcoran@Sun.COM 	} else {
1521*9652SMichael.Corcoran@Sun.COM 		*dipp = dip;
1522*9652SMichael.Corcoran@Sun.COM 		(void) ndi_hold_devi(dip);
1523*9652SMichael.Corcoran@Sun.COM 		rv = PSM_SUCCESS;
1524*9652SMichael.Corcoran@Sun.COM 	}
1525*9652SMichael.Corcoran@Sun.COM 	ndi_devi_exit(cpu_nex_devi, circ);
1526*9652SMichael.Corcoran@Sun.COM 
1527*9652SMichael.Corcoran@Sun.COM 	return (rv);
1528*9652SMichael.Corcoran@Sun.COM }
1529*9652SMichael.Corcoran@Sun.COM 
1530*9652SMichael.Corcoran@Sun.COM /*
1531*9652SMichael.Corcoran@Sun.COM  * Create cpu device node in device tree and online it.
1532*9652SMichael.Corcoran@Sun.COM  * Return created dip with reference count held if requested.
1533*9652SMichael.Corcoran@Sun.COM  */
1534*9652SMichael.Corcoran@Sun.COM int
1535*9652SMichael.Corcoran@Sun.COM mach_cpu_create_device_node(struct cpu *cp, dev_info_t **dipp)
1536*9652SMichael.Corcoran@Sun.COM {
1537*9652SMichael.Corcoran@Sun.COM 	int rv;
1538*9652SMichael.Corcoran@Sun.COM 	dev_info_t *dip = NULL;
1539*9652SMichael.Corcoran@Sun.COM 
1540*9652SMichael.Corcoran@Sun.COM 	ASSERT(psm_cpu_create_devinfo != NULL);
1541*9652SMichael.Corcoran@Sun.COM 	rv = psm_cpu_create_devinfo(cp, &dip);
1542*9652SMichael.Corcoran@Sun.COM 	if (rv == PSM_SUCCESS) {
1543*9652SMichael.Corcoran@Sun.COM 		cpuid_set_cpu_properties(dip, cp->cpu_id, cp->cpu_m.mcpu_cpi);
1544*9652SMichael.Corcoran@Sun.COM 		/* Recursively attach driver for parent nexus device. */
1545*9652SMichael.Corcoran@Sun.COM 		if (i_ddi_attach_node_hierarchy(ddi_get_parent(dip)) ==
1546*9652SMichael.Corcoran@Sun.COM 		    DDI_SUCCESS) {
1547*9652SMichael.Corcoran@Sun.COM 			/* Configure cpu itself and descendants. */
1548*9652SMichael.Corcoran@Sun.COM 			(void) ndi_devi_online(dip,
1549*9652SMichael.Corcoran@Sun.COM 			    NDI_ONLINE_ATTACH | NDI_CONFIG);
1550*9652SMichael.Corcoran@Sun.COM 		}
1551*9652SMichael.Corcoran@Sun.COM 		if (dipp != NULL) {
1552*9652SMichael.Corcoran@Sun.COM 			*dipp = dip;
1553*9652SMichael.Corcoran@Sun.COM 		} else {
1554*9652SMichael.Corcoran@Sun.COM 			(void) ndi_rele_devi(dip);
1555*9652SMichael.Corcoran@Sun.COM 		}
1556*9652SMichael.Corcoran@Sun.COM 	}
1557*9652SMichael.Corcoran@Sun.COM 
1558*9652SMichael.Corcoran@Sun.COM 	return (rv);
1559*9652SMichael.Corcoran@Sun.COM }
1560*9652SMichael.Corcoran@Sun.COM 
15610Sstevel@tonic-gate /*ARGSUSED*/
15620Sstevel@tonic-gate static int
15630Sstevel@tonic-gate mach_translate_irq(dev_info_t *dip, int irqno)
15640Sstevel@tonic-gate {
15650Sstevel@tonic-gate 	return (irqno);	/* default to NO translation */
15660Sstevel@tonic-gate }
15670Sstevel@tonic-gate 
15680Sstevel@tonic-gate static void
15690Sstevel@tonic-gate mach_notify_error(int level, char *errmsg)
15700Sstevel@tonic-gate {
15710Sstevel@tonic-gate 	/*
15720Sstevel@tonic-gate 	 * SL_FATAL is pass in once panicstr is set, deliver it
15730Sstevel@tonic-gate 	 * as CE_PANIC.  Also, translate SL_ codes back to CE_
15740Sstevel@tonic-gate 	 * codes for the psmi handler
15750Sstevel@tonic-gate 	 */
15760Sstevel@tonic-gate 	if (level & SL_FATAL)
15770Sstevel@tonic-gate 		(*notify_error)(CE_PANIC, errmsg);
15780Sstevel@tonic-gate 	else if (level & SL_WARN)
15790Sstevel@tonic-gate 		(*notify_error)(CE_WARN, errmsg);
15800Sstevel@tonic-gate 	else if (level & SL_NOTE)
15810Sstevel@tonic-gate 		(*notify_error)(CE_NOTE, errmsg);
15820Sstevel@tonic-gate 	else if (level & SL_CONSOLE)
15830Sstevel@tonic-gate 		(*notify_error)(CE_CONT, errmsg);
15840Sstevel@tonic-gate }
15850Sstevel@tonic-gate 
15860Sstevel@tonic-gate /*
15870Sstevel@tonic-gate  * It provides the default basic intr_ops interface for the new DDI
15880Sstevel@tonic-gate  * interrupt framework if the PSM doesn't have one.
15890Sstevel@tonic-gate  *
15900Sstevel@tonic-gate  * Input:
15910Sstevel@tonic-gate  * dip     - pointer to the dev_info structure of the requested device
15920Sstevel@tonic-gate  * hdlp    - pointer to the internal interrupt handle structure for the
15930Sstevel@tonic-gate  *	     requested interrupt
15940Sstevel@tonic-gate  * intr_op - opcode for this call
15950Sstevel@tonic-gate  * result  - pointer to the integer that will hold the result to be
15960Sstevel@tonic-gate  *	     passed back if return value is PSM_SUCCESS
15970Sstevel@tonic-gate  *
15980Sstevel@tonic-gate  * Output:
15990Sstevel@tonic-gate  * return value is either PSM_SUCCESS or PSM_FAILURE
16000Sstevel@tonic-gate  */
16010Sstevel@tonic-gate static int
16020Sstevel@tonic-gate mach_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp,
16030Sstevel@tonic-gate     psm_intr_op_t intr_op, int *result)
16040Sstevel@tonic-gate {
16050Sstevel@tonic-gate 	struct intrspec *ispec;
16060Sstevel@tonic-gate 
16070Sstevel@tonic-gate 	switch (intr_op) {
16080Sstevel@tonic-gate 	case PSM_INTR_OP_CHECK_MSI:
16090Sstevel@tonic-gate 		*result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI |
16104481Sbholler 		    DDI_INTR_TYPE_MSIX);
16110Sstevel@tonic-gate 		break;
16120Sstevel@tonic-gate 	case PSM_INTR_OP_ALLOC_VECTORS:
16130Sstevel@tonic-gate 		if (hdlp->ih_type == DDI_INTR_TYPE_FIXED)
16140Sstevel@tonic-gate 			*result = 1;
16150Sstevel@tonic-gate 		else
16160Sstevel@tonic-gate 			*result = 0;
16170Sstevel@tonic-gate 		break;
16180Sstevel@tonic-gate 	case PSM_INTR_OP_FREE_VECTORS:
16190Sstevel@tonic-gate 		break;
16200Sstevel@tonic-gate 	case PSM_INTR_OP_NAVAIL_VECTORS:
16210Sstevel@tonic-gate 		if (hdlp->ih_type == DDI_INTR_TYPE_FIXED)
16220Sstevel@tonic-gate 			*result = 1;
16230Sstevel@tonic-gate 		else
16240Sstevel@tonic-gate 			*result = 0;
16250Sstevel@tonic-gate 		break;
16260Sstevel@tonic-gate 	case PSM_INTR_OP_XLATE_VECTOR:
1627916Sschwartz 		ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp;
16280Sstevel@tonic-gate 		*result = psm_translate_irq(dip, ispec->intrspec_vec);
16290Sstevel@tonic-gate 		break;
16300Sstevel@tonic-gate 	case PSM_INTR_OP_GET_CAP:
16310Sstevel@tonic-gate 		*result = 0;
16320Sstevel@tonic-gate 		break;
16330Sstevel@tonic-gate 	case PSM_INTR_OP_GET_PENDING:
16340Sstevel@tonic-gate 	case PSM_INTR_OP_CLEAR_MASK:
16350Sstevel@tonic-gate 	case PSM_INTR_OP_SET_MASK:
16360Sstevel@tonic-gate 	case PSM_INTR_OP_GET_SHARED:
16370Sstevel@tonic-gate 	case PSM_INTR_OP_SET_PRI:
16380Sstevel@tonic-gate 	case PSM_INTR_OP_SET_CAP:
1639916Sschwartz 	case PSM_INTR_OP_SET_CPU:
1640916Sschwartz 	case PSM_INTR_OP_GET_INTR:
16410Sstevel@tonic-gate 	default:
16420Sstevel@tonic-gate 		return (PSM_FAILURE);
16430Sstevel@tonic-gate 	}
16440Sstevel@tonic-gate 	return (PSM_SUCCESS);
16450Sstevel@tonic-gate }
16464769Sdp78419 /*
16474769Sdp78419  * Return 1 if CMT load balancing policies should be
16484769Sdp78419  * implemented across instances of the specified hardware
16494769Sdp78419  * sharing relationship.
16504769Sdp78419  */
16514769Sdp78419 int
16524769Sdp78419 pg_cmt_load_bal_hw(pghw_type_t hw)
16534769Sdp78419 {
16544769Sdp78419 	if (hw == PGHW_IPIPE ||
16554769Sdp78419 	    hw == PGHW_FPU ||
16564769Sdp78419 	    hw == PGHW_CHIP)
16574769Sdp78419 		return (1);
16584769Sdp78419 	else
16594769Sdp78419 		return (0);
16604769Sdp78419 }
16614769Sdp78419 /*
16624769Sdp78419  * Return 1 if thread affinity polices should be implemented
16634769Sdp78419  * for instances of the specifed hardware sharing relationship.
16644769Sdp78419  */
16654769Sdp78419 int
16664769Sdp78419 pg_cmt_affinity_hw(pghw_type_t hw)
16674769Sdp78419 {
16684769Sdp78419 	if (hw == PGHW_CACHE)
16694769Sdp78419 		return (1);
16704769Sdp78419 	else
16714769Sdp78419 		return (0);
16724769Sdp78419 }
1673