18906SEric.Saxe@Sun.COM /*
28906SEric.Saxe@Sun.COM * CDDL HEADER START
38906SEric.Saxe@Sun.COM *
48906SEric.Saxe@Sun.COM * The contents of this file are subject to the terms of the
58906SEric.Saxe@Sun.COM * Common Development and Distribution License (the "License").
68906SEric.Saxe@Sun.COM * You may not use this file except in compliance with the License.
78906SEric.Saxe@Sun.COM *
88906SEric.Saxe@Sun.COM * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
98906SEric.Saxe@Sun.COM * or http://www.opensolaris.org/os/licensing.
108906SEric.Saxe@Sun.COM * See the License for the specific language governing permissions
118906SEric.Saxe@Sun.COM * and limitations under the License.
128906SEric.Saxe@Sun.COM *
138906SEric.Saxe@Sun.COM * When distributing Covered Code, include this CDDL HEADER in each
148906SEric.Saxe@Sun.COM * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
158906SEric.Saxe@Sun.COM * If applicable, add the following below this CDDL HEADER, with the
168906SEric.Saxe@Sun.COM * fields enclosed by brackets "[]" replaced with your own identifying
178906SEric.Saxe@Sun.COM * information: Portions Copyright [yyyy] [name of copyright owner]
188906SEric.Saxe@Sun.COM *
198906SEric.Saxe@Sun.COM * CDDL HEADER END
208906SEric.Saxe@Sun.COM */
218906SEric.Saxe@Sun.COM /*
228906SEric.Saxe@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
238906SEric.Saxe@Sun.COM * Use is subject to license terms.
248906SEric.Saxe@Sun.COM */
259283SBill.Holler@Sun.COM /*
26*12004Sjiang.liu@intel.com * Copyright (c) 2009-2010, Intel Corporation.
279283SBill.Holler@Sun.COM * All rights reserved.
289283SBill.Holler@Sun.COM */
298906SEric.Saxe@Sun.COM
308906SEric.Saxe@Sun.COM #include <sys/x86_archext.h>
318906SEric.Saxe@Sun.COM #include <sys/machsystm.h>
328906SEric.Saxe@Sun.COM #include <sys/x_call.h>
338906SEric.Saxe@Sun.COM #include <sys/stat.h>
348906SEric.Saxe@Sun.COM #include <sys/acpi/acpi.h>
358906SEric.Saxe@Sun.COM #include <sys/acpica.h>
368906SEric.Saxe@Sun.COM #include <sys/cpu_acpi.h>
378906SEric.Saxe@Sun.COM #include <sys/cpu_idle.h>
388906SEric.Saxe@Sun.COM #include <sys/cpupm.h>
399637SRandy.Fishel@Sun.COM #include <sys/cpu_event.h>
408906SEric.Saxe@Sun.COM #include <sys/hpet.h>
418906SEric.Saxe@Sun.COM #include <sys/archsystm.h>
428906SEric.Saxe@Sun.COM #include <vm/hat_i86.h>
438906SEric.Saxe@Sun.COM #include <sys/dtrace.h>
448906SEric.Saxe@Sun.COM #include <sys/sdt.h>
458906SEric.Saxe@Sun.COM #include <sys/callb.h>
468906SEric.Saxe@Sun.COM
479283SBill.Holler@Sun.COM #define CSTATE_USING_HPET 1
489283SBill.Holler@Sun.COM #define CSTATE_USING_LAT 2
499283SBill.Holler@Sun.COM
50*12004Sjiang.liu@intel.com #define CPU_IDLE_STOP_TIMEOUT 1000
51*12004Sjiang.liu@intel.com
528906SEric.Saxe@Sun.COM extern void cpu_idle_adaptive(void);
538983SBill.Holler@Sun.COM extern uint32_t cpupm_next_cstate(cma_c_state_t *cs_data,
548983SBill.Holler@Sun.COM cpu_acpi_cstate_t *cstates, uint32_t cs_count, hrtime_t start);
558906SEric.Saxe@Sun.COM
568906SEric.Saxe@Sun.COM static int cpu_idle_init(cpu_t *);
578906SEric.Saxe@Sun.COM static void cpu_idle_fini(cpu_t *);
5810488SMark.Haywood@Sun.COM static void cpu_idle_stop(cpu_t *);
598906SEric.Saxe@Sun.COM static boolean_t cpu_deep_idle_callb(void *arg, int code);
608906SEric.Saxe@Sun.COM static boolean_t cpu_idle_cpr_callb(void *arg, int code);
618906SEric.Saxe@Sun.COM static void acpi_cpu_cstate(cpu_acpi_cstate_t *cstate);
628906SEric.Saxe@Sun.COM
639283SBill.Holler@Sun.COM static boolean_t cstate_use_timer(hrtime_t *lapic_expire, int timer);
649283SBill.Holler@Sun.COM
659283SBill.Holler@Sun.COM /*
669283SBill.Holler@Sun.COM * the flag of always-running local APIC timer.
679283SBill.Holler@Sun.COM * the flag of HPET Timer use in deep cstate.
689283SBill.Holler@Sun.COM */
699283SBill.Holler@Sun.COM static boolean_t cpu_cstate_arat = B_FALSE;
709283SBill.Holler@Sun.COM static boolean_t cpu_cstate_hpet = B_FALSE;
719283SBill.Holler@Sun.COM
728906SEric.Saxe@Sun.COM /*
738906SEric.Saxe@Sun.COM * Interfaces for modules implementing Intel's deep c-state.
748906SEric.Saxe@Sun.COM */
758906SEric.Saxe@Sun.COM cpupm_state_ops_t cpu_idle_ops = {
768906SEric.Saxe@Sun.COM "Generic ACPI C-state Support",
778906SEric.Saxe@Sun.COM cpu_idle_init,
788906SEric.Saxe@Sun.COM cpu_idle_fini,
7910488SMark.Haywood@Sun.COM NULL,
8010488SMark.Haywood@Sun.COM cpu_idle_stop
818906SEric.Saxe@Sun.COM };
828906SEric.Saxe@Sun.COM
838906SEric.Saxe@Sun.COM static kmutex_t cpu_idle_callb_mutex;
848906SEric.Saxe@Sun.COM static callb_id_t cpu_deep_idle_callb_id;
858906SEric.Saxe@Sun.COM static callb_id_t cpu_idle_cpr_callb_id;
868906SEric.Saxe@Sun.COM static uint_t cpu_idle_cfg_state;
878906SEric.Saxe@Sun.COM
888906SEric.Saxe@Sun.COM static kmutex_t cpu_idle_mutex;
898906SEric.Saxe@Sun.COM
908906SEric.Saxe@Sun.COM cpu_idle_kstat_t cpu_idle_kstat = {
918906SEric.Saxe@Sun.COM { "address_space_id", KSTAT_DATA_STRING },
928906SEric.Saxe@Sun.COM { "latency", KSTAT_DATA_UINT32 },
938906SEric.Saxe@Sun.COM { "power", KSTAT_DATA_UINT32 },
948906SEric.Saxe@Sun.COM };
958906SEric.Saxe@Sun.COM
968906SEric.Saxe@Sun.COM /*
978906SEric.Saxe@Sun.COM * kstat update function of the c-state info
988906SEric.Saxe@Sun.COM */
998906SEric.Saxe@Sun.COM static int
cpu_idle_kstat_update(kstat_t * ksp,int flag)1008906SEric.Saxe@Sun.COM cpu_idle_kstat_update(kstat_t *ksp, int flag)
1018906SEric.Saxe@Sun.COM {
1028906SEric.Saxe@Sun.COM cpu_acpi_cstate_t *cstate = ksp->ks_private;
1038906SEric.Saxe@Sun.COM
1048906SEric.Saxe@Sun.COM if (flag == KSTAT_WRITE) {
1058906SEric.Saxe@Sun.COM return (EACCES);
1068906SEric.Saxe@Sun.COM }
1078906SEric.Saxe@Sun.COM
1088906SEric.Saxe@Sun.COM if (cstate->cs_addrspace_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
1098906SEric.Saxe@Sun.COM kstat_named_setstr(&cpu_idle_kstat.addr_space_id,
1108906SEric.Saxe@Sun.COM "FFixedHW");
1118906SEric.Saxe@Sun.COM } else if (cstate->cs_addrspace_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1128906SEric.Saxe@Sun.COM kstat_named_setstr(&cpu_idle_kstat.addr_space_id,
1138906SEric.Saxe@Sun.COM "SystemIO");
1148906SEric.Saxe@Sun.COM } else {
1158906SEric.Saxe@Sun.COM kstat_named_setstr(&cpu_idle_kstat.addr_space_id,
1168906SEric.Saxe@Sun.COM "Unsupported");
1178906SEric.Saxe@Sun.COM }
1188906SEric.Saxe@Sun.COM
1198906SEric.Saxe@Sun.COM cpu_idle_kstat.cs_latency.value.ui32 = cstate->cs_latency;
1208906SEric.Saxe@Sun.COM cpu_idle_kstat.cs_power.value.ui32 = cstate->cs_power;
1218906SEric.Saxe@Sun.COM
1228906SEric.Saxe@Sun.COM return (0);
1238906SEric.Saxe@Sun.COM }
1248906SEric.Saxe@Sun.COM
1258906SEric.Saxe@Sun.COM /*
1269283SBill.Holler@Sun.COM * Used during configuration callbacks to manage implementation specific
1279283SBill.Holler@Sun.COM * details of the hardware timer used during Deep C-state.
1289283SBill.Holler@Sun.COM */
1299283SBill.Holler@Sun.COM boolean_t
cstate_timer_callback(int code)1309283SBill.Holler@Sun.COM cstate_timer_callback(int code)
1319283SBill.Holler@Sun.COM {
1329283SBill.Holler@Sun.COM if (cpu_cstate_arat) {
1339283SBill.Holler@Sun.COM return (B_TRUE);
1349283SBill.Holler@Sun.COM } else if (cpu_cstate_hpet) {
1359283SBill.Holler@Sun.COM return (hpet.callback(code));
1369283SBill.Holler@Sun.COM }
1379283SBill.Holler@Sun.COM return (B_FALSE);
1389283SBill.Holler@Sun.COM }
1399283SBill.Holler@Sun.COM
1409283SBill.Holler@Sun.COM /*
1419283SBill.Holler@Sun.COM * Some Local APIC Timers do not work during Deep C-states.
1429283SBill.Holler@Sun.COM * The Deep C-state idle function uses this function to ensure it is using a
1439283SBill.Holler@Sun.COM * hardware timer that works during Deep C-states. This function also
1449283SBill.Holler@Sun.COM * switches the timer back to the LACPI Timer after Deep C-state.
1459283SBill.Holler@Sun.COM */
1469283SBill.Holler@Sun.COM static boolean_t
cstate_use_timer(hrtime_t * lapic_expire,int timer)1479283SBill.Holler@Sun.COM cstate_use_timer(hrtime_t *lapic_expire, int timer)
1489283SBill.Holler@Sun.COM {
1499283SBill.Holler@Sun.COM if (cpu_cstate_arat)
1509283SBill.Holler@Sun.COM return (B_TRUE);
1519283SBill.Holler@Sun.COM
1529283SBill.Holler@Sun.COM /*
1539283SBill.Holler@Sun.COM * We have to return B_FALSE if no arat or hpet support
1549283SBill.Holler@Sun.COM */
1559283SBill.Holler@Sun.COM if (!cpu_cstate_hpet)
1569283SBill.Holler@Sun.COM return (B_FALSE);
1579283SBill.Holler@Sun.COM
1589283SBill.Holler@Sun.COM switch (timer) {
1599283SBill.Holler@Sun.COM case CSTATE_USING_HPET:
1609283SBill.Holler@Sun.COM return (hpet.use_hpet_timer(lapic_expire));
1619283SBill.Holler@Sun.COM case CSTATE_USING_LAT:
1629283SBill.Holler@Sun.COM hpet.use_lapic_timer(*lapic_expire);
1639283SBill.Holler@Sun.COM return (B_TRUE);
1649283SBill.Holler@Sun.COM default:
1659283SBill.Holler@Sun.COM return (B_FALSE);
1669283SBill.Holler@Sun.COM }
1679283SBill.Holler@Sun.COM }
1689283SBill.Holler@Sun.COM
1699283SBill.Holler@Sun.COM /*
1708906SEric.Saxe@Sun.COM * c-state wakeup function.
1718906SEric.Saxe@Sun.COM * Similar to cpu_wakeup and cpu_wakeup_mwait except this function deals
1728906SEric.Saxe@Sun.COM * with CPUs asleep in MWAIT, HLT, or ACPI Deep C-State.
1738906SEric.Saxe@Sun.COM */
1748906SEric.Saxe@Sun.COM void
cstate_wakeup(cpu_t * cp,int bound)1758906SEric.Saxe@Sun.COM cstate_wakeup(cpu_t *cp, int bound)
1768906SEric.Saxe@Sun.COM {
1778906SEric.Saxe@Sun.COM struct machcpu *mcpu = &(cp->cpu_m);
1788906SEric.Saxe@Sun.COM volatile uint32_t *mcpu_mwait = mcpu->mcpu_mwait;
1798906SEric.Saxe@Sun.COM cpupart_t *cpu_part;
1808906SEric.Saxe@Sun.COM uint_t cpu_found;
1818906SEric.Saxe@Sun.COM processorid_t cpu_sid;
1828906SEric.Saxe@Sun.COM
1838906SEric.Saxe@Sun.COM cpu_part = cp->cpu_part;
1848906SEric.Saxe@Sun.COM cpu_sid = cp->cpu_seqid;
1858906SEric.Saxe@Sun.COM /*
1868906SEric.Saxe@Sun.COM * Clear the halted bit for that CPU since it will be woken up
1878906SEric.Saxe@Sun.COM * in a moment.
1888906SEric.Saxe@Sun.COM */
1898906SEric.Saxe@Sun.COM if (bitset_in_set(&cpu_part->cp_haltset, cpu_sid)) {
1908906SEric.Saxe@Sun.COM /*
1918906SEric.Saxe@Sun.COM * Clear the halted bit for that CPU since it will be
1928906SEric.Saxe@Sun.COM * poked in a moment.
1938906SEric.Saxe@Sun.COM */
1948906SEric.Saxe@Sun.COM bitset_atomic_del(&cpu_part->cp_haltset, cpu_sid);
1958906SEric.Saxe@Sun.COM
1968906SEric.Saxe@Sun.COM /*
1978906SEric.Saxe@Sun.COM * We may find the current CPU present in the halted cpuset
1988906SEric.Saxe@Sun.COM * if we're in the context of an interrupt that occurred
1998906SEric.Saxe@Sun.COM * before we had a chance to clear our bit in cpu_idle().
2008906SEric.Saxe@Sun.COM * Waking ourself is obviously unnecessary, since if
2018906SEric.Saxe@Sun.COM * we're here, we're not halted.
2028906SEric.Saxe@Sun.COM */
2038906SEric.Saxe@Sun.COM if (cp != CPU) {
2048906SEric.Saxe@Sun.COM /*
2058906SEric.Saxe@Sun.COM * Use correct wakeup mechanism
2068906SEric.Saxe@Sun.COM */
2078906SEric.Saxe@Sun.COM if ((mcpu_mwait != NULL) &&
2088906SEric.Saxe@Sun.COM (*mcpu_mwait == MWAIT_HALTED))
2098906SEric.Saxe@Sun.COM MWAIT_WAKEUP(cp);
2108906SEric.Saxe@Sun.COM else
2118906SEric.Saxe@Sun.COM poke_cpu(cp->cpu_id);
2128906SEric.Saxe@Sun.COM }
2138906SEric.Saxe@Sun.COM return;
2148906SEric.Saxe@Sun.COM } else {
2158906SEric.Saxe@Sun.COM /*
2168906SEric.Saxe@Sun.COM * This cpu isn't halted, but it's idle or undergoing a
2178906SEric.Saxe@Sun.COM * context switch. No need to awaken anyone else.
2188906SEric.Saxe@Sun.COM */
2198906SEric.Saxe@Sun.COM if (cp->cpu_thread == cp->cpu_idle_thread ||
2208906SEric.Saxe@Sun.COM cp->cpu_disp_flags & CPU_DISP_DONTSTEAL)
2218906SEric.Saxe@Sun.COM return;
2228906SEric.Saxe@Sun.COM }
2238906SEric.Saxe@Sun.COM
2248906SEric.Saxe@Sun.COM /*
2258906SEric.Saxe@Sun.COM * No need to wake up other CPUs if the thread we just enqueued
2268906SEric.Saxe@Sun.COM * is bound.
2278906SEric.Saxe@Sun.COM */
2288906SEric.Saxe@Sun.COM if (bound)
2298906SEric.Saxe@Sun.COM return;
2308906SEric.Saxe@Sun.COM
2318906SEric.Saxe@Sun.COM
2328906SEric.Saxe@Sun.COM /*
2338906SEric.Saxe@Sun.COM * See if there's any other halted CPUs. If there are, then
2348906SEric.Saxe@Sun.COM * select one, and awaken it.
2358906SEric.Saxe@Sun.COM * It's possible that after we find a CPU, somebody else
2368906SEric.Saxe@Sun.COM * will awaken it before we get the chance.
2378906SEric.Saxe@Sun.COM * In that case, look again.
2388906SEric.Saxe@Sun.COM */
2398906SEric.Saxe@Sun.COM do {
2408906SEric.Saxe@Sun.COM cpu_found = bitset_find(&cpu_part->cp_haltset);
2418906SEric.Saxe@Sun.COM if (cpu_found == (uint_t)-1)
2428906SEric.Saxe@Sun.COM return;
2438906SEric.Saxe@Sun.COM
2448906SEric.Saxe@Sun.COM } while (bitset_atomic_test_and_del(&cpu_part->cp_haltset,
2458906SEric.Saxe@Sun.COM cpu_found) < 0);
2468906SEric.Saxe@Sun.COM
2478906SEric.Saxe@Sun.COM /*
2488906SEric.Saxe@Sun.COM * Must use correct wakeup mechanism to avoid lost wakeup of
2498906SEric.Saxe@Sun.COM * alternate cpu.
2508906SEric.Saxe@Sun.COM */
2518906SEric.Saxe@Sun.COM if (cpu_found != CPU->cpu_seqid) {
25211273Saubrey.li@intel.com mcpu_mwait = cpu_seq[cpu_found]->cpu_m.mcpu_mwait;
2538906SEric.Saxe@Sun.COM if ((mcpu_mwait != NULL) && (*mcpu_mwait == MWAIT_HALTED))
2548906SEric.Saxe@Sun.COM MWAIT_WAKEUP(cpu_seq[cpu_found]);
2558906SEric.Saxe@Sun.COM else
2568906SEric.Saxe@Sun.COM poke_cpu(cpu_seq[cpu_found]->cpu_id);
2578906SEric.Saxe@Sun.COM }
2588906SEric.Saxe@Sun.COM }
2598906SEric.Saxe@Sun.COM
2608906SEric.Saxe@Sun.COM /*
2619637SRandy.Fishel@Sun.COM * Function called by CPU idle notification framework to check whether CPU
2629637SRandy.Fishel@Sun.COM * has been awakened. It will be called with interrupt disabled.
2639637SRandy.Fishel@Sun.COM * If CPU has been awakened, call cpu_idle_exit() to notify CPU idle
2649637SRandy.Fishel@Sun.COM * notification framework.
2659637SRandy.Fishel@Sun.COM */
2669637SRandy.Fishel@Sun.COM static void
acpi_cpu_mwait_check_wakeup(void * arg)2679637SRandy.Fishel@Sun.COM acpi_cpu_mwait_check_wakeup(void *arg)
2689637SRandy.Fishel@Sun.COM {
2699637SRandy.Fishel@Sun.COM volatile uint32_t *mcpu_mwait = (volatile uint32_t *)arg;
2709637SRandy.Fishel@Sun.COM
2719637SRandy.Fishel@Sun.COM ASSERT(arg != NULL);
2729637SRandy.Fishel@Sun.COM if (*mcpu_mwait != MWAIT_HALTED) {
2739637SRandy.Fishel@Sun.COM /*
2749637SRandy.Fishel@Sun.COM * CPU has been awakened, notify CPU idle notification system.
2759637SRandy.Fishel@Sun.COM */
2769637SRandy.Fishel@Sun.COM cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
2779637SRandy.Fishel@Sun.COM } else {
2789637SRandy.Fishel@Sun.COM /*
2799637SRandy.Fishel@Sun.COM * Toggle interrupt flag to detect pending interrupts.
2809637SRandy.Fishel@Sun.COM * If interrupt happened, do_interrupt() will notify CPU idle
2819637SRandy.Fishel@Sun.COM * notification framework so no need to call cpu_idle_exit()
2829637SRandy.Fishel@Sun.COM * here.
2839637SRandy.Fishel@Sun.COM */
2849637SRandy.Fishel@Sun.COM sti();
2859637SRandy.Fishel@Sun.COM SMT_PAUSE();
2869637SRandy.Fishel@Sun.COM cli();
2879637SRandy.Fishel@Sun.COM }
2889637SRandy.Fishel@Sun.COM }
2899637SRandy.Fishel@Sun.COM
2909637SRandy.Fishel@Sun.COM static void
acpi_cpu_mwait_ipi_check_wakeup(void * arg)2919637SRandy.Fishel@Sun.COM acpi_cpu_mwait_ipi_check_wakeup(void *arg)
2929637SRandy.Fishel@Sun.COM {
2939637SRandy.Fishel@Sun.COM volatile uint32_t *mcpu_mwait = (volatile uint32_t *)arg;
2949637SRandy.Fishel@Sun.COM
2959637SRandy.Fishel@Sun.COM ASSERT(arg != NULL);
2969637SRandy.Fishel@Sun.COM if (*mcpu_mwait != MWAIT_WAKEUP_IPI) {
2979637SRandy.Fishel@Sun.COM /*
2989637SRandy.Fishel@Sun.COM * CPU has been awakened, notify CPU idle notification system.
2999637SRandy.Fishel@Sun.COM */
3009637SRandy.Fishel@Sun.COM cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
3019637SRandy.Fishel@Sun.COM } else {
3029637SRandy.Fishel@Sun.COM /*
3039637SRandy.Fishel@Sun.COM * Toggle interrupt flag to detect pending interrupts.
3049637SRandy.Fishel@Sun.COM * If interrupt happened, do_interrupt() will notify CPU idle
3059637SRandy.Fishel@Sun.COM * notification framework so no need to call cpu_idle_exit()
3069637SRandy.Fishel@Sun.COM * here.
3079637SRandy.Fishel@Sun.COM */
3089637SRandy.Fishel@Sun.COM sti();
3099637SRandy.Fishel@Sun.COM SMT_PAUSE();
3109637SRandy.Fishel@Sun.COM cli();
3119637SRandy.Fishel@Sun.COM }
3129637SRandy.Fishel@Sun.COM }
3139637SRandy.Fishel@Sun.COM
3149637SRandy.Fishel@Sun.COM /*ARGSUSED*/
3159637SRandy.Fishel@Sun.COM static void
acpi_cpu_check_wakeup(void * arg)3169637SRandy.Fishel@Sun.COM acpi_cpu_check_wakeup(void *arg)
3179637SRandy.Fishel@Sun.COM {
3189637SRandy.Fishel@Sun.COM /*
3199637SRandy.Fishel@Sun.COM * Toggle interrupt flag to detect pending interrupts.
3209637SRandy.Fishel@Sun.COM * If interrupt happened, do_interrupt() will notify CPU idle
3219637SRandy.Fishel@Sun.COM * notification framework so no need to call cpu_idle_exit() here.
3229637SRandy.Fishel@Sun.COM */
3239637SRandy.Fishel@Sun.COM sti();
3249637SRandy.Fishel@Sun.COM SMT_PAUSE();
3259637SRandy.Fishel@Sun.COM cli();
3269637SRandy.Fishel@Sun.COM }
3279637SRandy.Fishel@Sun.COM
3289637SRandy.Fishel@Sun.COM /*
3298906SEric.Saxe@Sun.COM * enter deep c-state handler
3308906SEric.Saxe@Sun.COM */
3318906SEric.Saxe@Sun.COM static void
acpi_cpu_cstate(cpu_acpi_cstate_t * cstate)3328906SEric.Saxe@Sun.COM acpi_cpu_cstate(cpu_acpi_cstate_t *cstate)
3338906SEric.Saxe@Sun.COM {
3348906SEric.Saxe@Sun.COM volatile uint32_t *mcpu_mwait = CPU->cpu_m.mcpu_mwait;
3358906SEric.Saxe@Sun.COM cpu_t *cpup = CPU;
3368906SEric.Saxe@Sun.COM processorid_t cpu_sid = cpup->cpu_seqid;
3378906SEric.Saxe@Sun.COM cpupart_t *cp = cpup->cpu_part;
3388906SEric.Saxe@Sun.COM hrtime_t lapic_expire;
3398906SEric.Saxe@Sun.COM uint8_t type = cstate->cs_addrspace_id;
3408906SEric.Saxe@Sun.COM uint32_t cs_type = cstate->cs_type;
3418906SEric.Saxe@Sun.COM int hset_update = 1;
3429283SBill.Holler@Sun.COM boolean_t using_timer;
3439637SRandy.Fishel@Sun.COM cpu_idle_check_wakeup_t check_func = &acpi_cpu_check_wakeup;
3448906SEric.Saxe@Sun.COM
3458906SEric.Saxe@Sun.COM /*
3468906SEric.Saxe@Sun.COM * Set our mcpu_mwait here, so we can tell if anyone tries to
3478906SEric.Saxe@Sun.COM * wake us between now and when we call mwait. No other cpu will
3488906SEric.Saxe@Sun.COM * attempt to set our mcpu_mwait until we add ourself to the haltset.
3498906SEric.Saxe@Sun.COM */
3508906SEric.Saxe@Sun.COM if (mcpu_mwait) {
3519637SRandy.Fishel@Sun.COM if (type == ACPI_ADR_SPACE_SYSTEM_IO) {
3528906SEric.Saxe@Sun.COM *mcpu_mwait = MWAIT_WAKEUP_IPI;
3539637SRandy.Fishel@Sun.COM check_func = &acpi_cpu_mwait_ipi_check_wakeup;
3549637SRandy.Fishel@Sun.COM } else {
3558906SEric.Saxe@Sun.COM *mcpu_mwait = MWAIT_HALTED;
3569637SRandy.Fishel@Sun.COM check_func = &acpi_cpu_mwait_check_wakeup;
3579637SRandy.Fishel@Sun.COM }
3588906SEric.Saxe@Sun.COM }
3598906SEric.Saxe@Sun.COM
3608906SEric.Saxe@Sun.COM /*
3618906SEric.Saxe@Sun.COM * If this CPU is online, and there are multiple CPUs
3628906SEric.Saxe@Sun.COM * in the system, then we should note our halting
3638906SEric.Saxe@Sun.COM * by adding ourselves to the partition's halted CPU
3648906SEric.Saxe@Sun.COM * bitmap. This allows other CPUs to find/awaken us when
3658906SEric.Saxe@Sun.COM * work becomes available.
3668906SEric.Saxe@Sun.COM */
3678906SEric.Saxe@Sun.COM if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1)
3688906SEric.Saxe@Sun.COM hset_update = 0;
3698906SEric.Saxe@Sun.COM
3708906SEric.Saxe@Sun.COM /*
3718906SEric.Saxe@Sun.COM * Add ourselves to the partition's halted CPUs bitmask
3728906SEric.Saxe@Sun.COM * and set our HALTED flag, if necessary.
3738906SEric.Saxe@Sun.COM *
3748906SEric.Saxe@Sun.COM * When a thread becomes runnable, it is placed on the queue
3758906SEric.Saxe@Sun.COM * and then the halted cpuset is checked to determine who
3768906SEric.Saxe@Sun.COM * (if anyone) should be awakened. We therefore need to first
3778906SEric.Saxe@Sun.COM * add ourselves to the halted cpuset, and and then check if there
3788906SEric.Saxe@Sun.COM * is any work available.
3798906SEric.Saxe@Sun.COM *
3808906SEric.Saxe@Sun.COM * Note that memory barriers after updating the HALTED flag
3818906SEric.Saxe@Sun.COM * are not necessary since an atomic operation (updating the bitmap)
3828906SEric.Saxe@Sun.COM * immediately follows. On x86 the atomic operation acts as a
3838906SEric.Saxe@Sun.COM * memory barrier for the update of cpu_disp_flags.
3848906SEric.Saxe@Sun.COM */
3858906SEric.Saxe@Sun.COM if (hset_update) {
3868906SEric.Saxe@Sun.COM cpup->cpu_disp_flags |= CPU_DISP_HALTED;
3878906SEric.Saxe@Sun.COM bitset_atomic_add(&cp->cp_haltset, cpu_sid);
3888906SEric.Saxe@Sun.COM }
3898906SEric.Saxe@Sun.COM
3908906SEric.Saxe@Sun.COM /*
3918906SEric.Saxe@Sun.COM * Check to make sure there's really nothing to do.
3928906SEric.Saxe@Sun.COM * Work destined for this CPU may become available after
3938906SEric.Saxe@Sun.COM * this check. We'll be notified through the clearing of our
3948906SEric.Saxe@Sun.COM * bit in the halted CPU bitmask, and a write to our mcpu_mwait.
3958906SEric.Saxe@Sun.COM *
3968906SEric.Saxe@Sun.COM * disp_anywork() checks disp_nrunnable, so we do not have to later.
3978906SEric.Saxe@Sun.COM */
3988906SEric.Saxe@Sun.COM if (disp_anywork()) {
3998906SEric.Saxe@Sun.COM if (hset_update) {
4008906SEric.Saxe@Sun.COM cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
4018906SEric.Saxe@Sun.COM bitset_atomic_del(&cp->cp_haltset, cpu_sid);
4028906SEric.Saxe@Sun.COM }
4038906SEric.Saxe@Sun.COM return;
4048906SEric.Saxe@Sun.COM }
4058906SEric.Saxe@Sun.COM
4068906SEric.Saxe@Sun.COM /*
4078906SEric.Saxe@Sun.COM * We're on our way to being halted.
4088906SEric.Saxe@Sun.COM *
4098906SEric.Saxe@Sun.COM * The local APIC timer can stop in ACPI C2 and deeper c-states.
4109283SBill.Holler@Sun.COM * Try to program the HPET hardware to substitute for this CPU's
4119283SBill.Holler@Sun.COM * LAPIC timer.
4129283SBill.Holler@Sun.COM * cstate_use_timer() could disable the LAPIC Timer. Make sure
4139283SBill.Holler@Sun.COM * to start the LAPIC Timer again before leaving this function.
4148906SEric.Saxe@Sun.COM *
4159283SBill.Holler@Sun.COM * Disable interrupts here so we will awaken immediately after halting
4169283SBill.Holler@Sun.COM * if someone tries to poke us between now and the time we actually
4179283SBill.Holler@Sun.COM * halt.
4188906SEric.Saxe@Sun.COM */
4199283SBill.Holler@Sun.COM cli();
4209283SBill.Holler@Sun.COM using_timer = cstate_use_timer(&lapic_expire, CSTATE_USING_HPET);
4218906SEric.Saxe@Sun.COM
4228906SEric.Saxe@Sun.COM /*
4238906SEric.Saxe@Sun.COM * We check for the presence of our bit after disabling interrupts.
4248906SEric.Saxe@Sun.COM * If it's cleared, we'll return. If the bit is cleared after
4258906SEric.Saxe@Sun.COM * we check then the cstate_wakeup() will pop us out of the halted
4268906SEric.Saxe@Sun.COM * state.
4278906SEric.Saxe@Sun.COM *
4288906SEric.Saxe@Sun.COM * This means that the ordering of the cstate_wakeup() and the clearing
4298906SEric.Saxe@Sun.COM * of the bit by cpu_wakeup is important.
4308906SEric.Saxe@Sun.COM * cpu_wakeup() must clear our mc_haltset bit, and then call
4318906SEric.Saxe@Sun.COM * cstate_wakeup().
4328906SEric.Saxe@Sun.COM * acpi_cpu_cstate() must disable interrupts, then check for the bit.
4338906SEric.Saxe@Sun.COM */
4348906SEric.Saxe@Sun.COM if (hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid) == 0) {
4359283SBill.Holler@Sun.COM (void) cstate_use_timer(&lapic_expire,
4369283SBill.Holler@Sun.COM CSTATE_USING_LAT);
4379283SBill.Holler@Sun.COM sti();
4388906SEric.Saxe@Sun.COM cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
4398906SEric.Saxe@Sun.COM return;
4408906SEric.Saxe@Sun.COM }
4418906SEric.Saxe@Sun.COM
4428906SEric.Saxe@Sun.COM /*
4438906SEric.Saxe@Sun.COM * The check for anything locally runnable is here for performance
4448906SEric.Saxe@Sun.COM * and isn't needed for correctness. disp_nrunnable ought to be
4458906SEric.Saxe@Sun.COM * in our cache still, so it's inexpensive to check, and if there
4468906SEric.Saxe@Sun.COM * is anything runnable we won't have to wait for the poke.
4478906SEric.Saxe@Sun.COM */
4488906SEric.Saxe@Sun.COM if (cpup->cpu_disp->disp_nrunnable != 0) {
4499283SBill.Holler@Sun.COM (void) cstate_use_timer(&lapic_expire,
4509283SBill.Holler@Sun.COM CSTATE_USING_LAT);
4519283SBill.Holler@Sun.COM sti();
4528906SEric.Saxe@Sun.COM if (hset_update) {
4538906SEric.Saxe@Sun.COM cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
4548906SEric.Saxe@Sun.COM bitset_atomic_del(&cp->cp_haltset, cpu_sid);
4558906SEric.Saxe@Sun.COM }
4568906SEric.Saxe@Sun.COM return;
4578906SEric.Saxe@Sun.COM }
4588906SEric.Saxe@Sun.COM
4599283SBill.Holler@Sun.COM if (using_timer == B_FALSE) {
4608906SEric.Saxe@Sun.COM
4619283SBill.Holler@Sun.COM (void) cstate_use_timer(&lapic_expire,
4629283SBill.Holler@Sun.COM CSTATE_USING_LAT);
4639283SBill.Holler@Sun.COM sti();
4648906SEric.Saxe@Sun.COM
4658906SEric.Saxe@Sun.COM /*
4668906SEric.Saxe@Sun.COM * We are currently unable to program the HPET to act as this
4679283SBill.Holler@Sun.COM * CPU's proxy LAPIC timer. This CPU cannot enter C2 or deeper
4689283SBill.Holler@Sun.COM * because no timer is set to wake it up while its LAPIC timer
4698906SEric.Saxe@Sun.COM * stalls in deep C-States.
4708906SEric.Saxe@Sun.COM * Enter C1 instead.
4718906SEric.Saxe@Sun.COM *
4728906SEric.Saxe@Sun.COM * cstate_wake_cpu() will wake this CPU with an IPI which
4738906SEric.Saxe@Sun.COM * works with MWAIT.
4748906SEric.Saxe@Sun.COM */
4758906SEric.Saxe@Sun.COM i86_monitor(mcpu_mwait, 0, 0);
4768906SEric.Saxe@Sun.COM if ((*mcpu_mwait & ~MWAIT_WAKEUP_IPI) == MWAIT_HALTED) {
4779637SRandy.Fishel@Sun.COM if (cpu_idle_enter(IDLE_STATE_C1, 0,
4789637SRandy.Fishel@Sun.COM check_func, (void *)mcpu_mwait) == 0) {
4799637SRandy.Fishel@Sun.COM if ((*mcpu_mwait & ~MWAIT_WAKEUP_IPI) ==
4809637SRandy.Fishel@Sun.COM MWAIT_HALTED) {
4819637SRandy.Fishel@Sun.COM i86_mwait(0, 0);
4829637SRandy.Fishel@Sun.COM }
4839637SRandy.Fishel@Sun.COM cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
4849637SRandy.Fishel@Sun.COM }
4858906SEric.Saxe@Sun.COM }
4868906SEric.Saxe@Sun.COM
4878906SEric.Saxe@Sun.COM /*
4888906SEric.Saxe@Sun.COM * We're no longer halted
4898906SEric.Saxe@Sun.COM */
4908906SEric.Saxe@Sun.COM if (hset_update) {
4918906SEric.Saxe@Sun.COM cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
4928906SEric.Saxe@Sun.COM bitset_atomic_del(&cp->cp_haltset, cpu_sid);
4938906SEric.Saxe@Sun.COM }
4948906SEric.Saxe@Sun.COM return;
4958906SEric.Saxe@Sun.COM }
4968906SEric.Saxe@Sun.COM
4978906SEric.Saxe@Sun.COM if (type == ACPI_ADR_SPACE_FIXED_HARDWARE) {
4988906SEric.Saxe@Sun.COM /*
4998906SEric.Saxe@Sun.COM * We're on our way to being halted.
5008906SEric.Saxe@Sun.COM * To avoid a lost wakeup, arm the monitor before checking
5018906SEric.Saxe@Sun.COM * if another cpu wrote to mcpu_mwait to wake us up.
5028906SEric.Saxe@Sun.COM */
5038906SEric.Saxe@Sun.COM i86_monitor(mcpu_mwait, 0, 0);
5048906SEric.Saxe@Sun.COM if (*mcpu_mwait == MWAIT_HALTED) {
5059637SRandy.Fishel@Sun.COM if (cpu_idle_enter((uint_t)cs_type, 0,
5069637SRandy.Fishel@Sun.COM check_func, (void *)mcpu_mwait) == 0) {
5079637SRandy.Fishel@Sun.COM if (*mcpu_mwait == MWAIT_HALTED) {
5089637SRandy.Fishel@Sun.COM i86_mwait(cstate->cs_address, 1);
5099637SRandy.Fishel@Sun.COM }
5109637SRandy.Fishel@Sun.COM cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
5119637SRandy.Fishel@Sun.COM }
5128906SEric.Saxe@Sun.COM }
5138906SEric.Saxe@Sun.COM } else if (type == ACPI_ADR_SPACE_SYSTEM_IO) {
5148906SEric.Saxe@Sun.COM uint32_t value;
5158906SEric.Saxe@Sun.COM ACPI_TABLE_FADT *gbl_FADT;
5168906SEric.Saxe@Sun.COM
5178906SEric.Saxe@Sun.COM if (*mcpu_mwait == MWAIT_WAKEUP_IPI) {
5189637SRandy.Fishel@Sun.COM if (cpu_idle_enter((uint_t)cs_type, 0,
5199637SRandy.Fishel@Sun.COM check_func, (void *)mcpu_mwait) == 0) {
5209637SRandy.Fishel@Sun.COM if (*mcpu_mwait == MWAIT_WAKEUP_IPI) {
5219637SRandy.Fishel@Sun.COM (void) cpu_acpi_read_port(
5229637SRandy.Fishel@Sun.COM cstate->cs_address, &value, 8);
5239637SRandy.Fishel@Sun.COM acpica_get_global_FADT(&gbl_FADT);
5249637SRandy.Fishel@Sun.COM (void) cpu_acpi_read_port(
5259637SRandy.Fishel@Sun.COM gbl_FADT->XPmTimerBlock.Address,
5269637SRandy.Fishel@Sun.COM &value, 32);
5279637SRandy.Fishel@Sun.COM }
5289637SRandy.Fishel@Sun.COM cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
5299637SRandy.Fishel@Sun.COM }
5308906SEric.Saxe@Sun.COM }
5318906SEric.Saxe@Sun.COM }
5328906SEric.Saxe@Sun.COM
5338906SEric.Saxe@Sun.COM /*
5349283SBill.Holler@Sun.COM * The LAPIC timer may have stopped in deep c-state.
5359283SBill.Holler@Sun.COM * Reprogram this CPU's LAPIC here before enabling interrupts.
5368906SEric.Saxe@Sun.COM */
5379283SBill.Holler@Sun.COM (void) cstate_use_timer(&lapic_expire, CSTATE_USING_LAT);
5389283SBill.Holler@Sun.COM sti();
5398906SEric.Saxe@Sun.COM
5408906SEric.Saxe@Sun.COM /*
5418906SEric.Saxe@Sun.COM * We're no longer halted
5428906SEric.Saxe@Sun.COM */
5438906SEric.Saxe@Sun.COM if (hset_update) {
5448906SEric.Saxe@Sun.COM cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
5458906SEric.Saxe@Sun.COM bitset_atomic_del(&cp->cp_haltset, cpu_sid);
5468906SEric.Saxe@Sun.COM }
5478906SEric.Saxe@Sun.COM }
5488906SEric.Saxe@Sun.COM
5498906SEric.Saxe@Sun.COM /*
5508906SEric.Saxe@Sun.COM * Idle the present CPU, deep c-state is supported
5518906SEric.Saxe@Sun.COM */
5528906SEric.Saxe@Sun.COM void
cpu_acpi_idle(void)5538906SEric.Saxe@Sun.COM cpu_acpi_idle(void)
5548906SEric.Saxe@Sun.COM {
5558906SEric.Saxe@Sun.COM cpu_t *cp = CPU;
5568906SEric.Saxe@Sun.COM cpu_acpi_handle_t handle;
5578906SEric.Saxe@Sun.COM cma_c_state_t *cs_data;
5588983SBill.Holler@Sun.COM cpu_acpi_cstate_t *cstates;
5598906SEric.Saxe@Sun.COM hrtime_t start, end;
5608906SEric.Saxe@Sun.COM int cpu_max_cstates;
5618983SBill.Holler@Sun.COM uint32_t cs_indx;
5628983SBill.Holler@Sun.COM uint16_t cs_type;
5638906SEric.Saxe@Sun.COM
5648906SEric.Saxe@Sun.COM cpupm_mach_state_t *mach_state =
5658906SEric.Saxe@Sun.COM (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
5668906SEric.Saxe@Sun.COM handle = mach_state->ms_acpi_handle;
5678906SEric.Saxe@Sun.COM ASSERT(CPU_ACPI_CSTATES(handle) != NULL);
5688906SEric.Saxe@Sun.COM
5698906SEric.Saxe@Sun.COM cs_data = mach_state->ms_cstate.cma_state.cstate;
5708983SBill.Holler@Sun.COM cstates = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
5718983SBill.Holler@Sun.COM ASSERT(cstates != NULL);
5728906SEric.Saxe@Sun.COM cpu_max_cstates = cpu_acpi_get_max_cstates(handle);
5738906SEric.Saxe@Sun.COM if (cpu_max_cstates > CPU_MAX_CSTATES)
5748906SEric.Saxe@Sun.COM cpu_max_cstates = CPU_MAX_CSTATES;
5758983SBill.Holler@Sun.COM if (cpu_max_cstates == 1) { /* no ACPI c-state data */
5768983SBill.Holler@Sun.COM (*non_deep_idle_cpu)();
5778983SBill.Holler@Sun.COM return;
5788983SBill.Holler@Sun.COM }
5798906SEric.Saxe@Sun.COM
5808906SEric.Saxe@Sun.COM start = gethrtime_unscaled();
5818906SEric.Saxe@Sun.COM
5828983SBill.Holler@Sun.COM cs_indx = cpupm_next_cstate(cs_data, cstates, cpu_max_cstates, start);
5838906SEric.Saxe@Sun.COM
5848983SBill.Holler@Sun.COM cs_type = cstates[cs_indx].cs_type;
5858906SEric.Saxe@Sun.COM
5868906SEric.Saxe@Sun.COM switch (cs_type) {
5878906SEric.Saxe@Sun.COM default:
5888906SEric.Saxe@Sun.COM /* FALLTHROUGH */
5898906SEric.Saxe@Sun.COM case CPU_ACPI_C1:
5908906SEric.Saxe@Sun.COM (*non_deep_idle_cpu)();
5918906SEric.Saxe@Sun.COM break;
5928906SEric.Saxe@Sun.COM
5938906SEric.Saxe@Sun.COM case CPU_ACPI_C2:
5948983SBill.Holler@Sun.COM acpi_cpu_cstate(&cstates[cs_indx]);
5958906SEric.Saxe@Sun.COM break;
5968906SEric.Saxe@Sun.COM
5978906SEric.Saxe@Sun.COM case CPU_ACPI_C3:
5988906SEric.Saxe@Sun.COM /*
59910447SBill.Holler@Sun.COM * All supported Intel processors maintain cache coherency
60010447SBill.Holler@Sun.COM * during C3. Currently when entering C3 processors flush
60110447SBill.Holler@Sun.COM * core caches to higher level shared cache. The shared cache
60210447SBill.Holler@Sun.COM * maintains state and supports probes during C3.
60310447SBill.Holler@Sun.COM * Consequently there is no need to handle cache coherency
60410447SBill.Holler@Sun.COM * and Bus Master activity here with the cache flush, BM_RLD
60510447SBill.Holler@Sun.COM * bit, BM_STS bit, nor PM2_CNT.ARB_DIS mechanisms described
60610447SBill.Holler@Sun.COM * in section 8.1.4 of the ACPI Specification 4.0.
6078906SEric.Saxe@Sun.COM */
6088983SBill.Holler@Sun.COM acpi_cpu_cstate(&cstates[cs_indx]);
6098906SEric.Saxe@Sun.COM break;
6108906SEric.Saxe@Sun.COM }
6118906SEric.Saxe@Sun.COM
6128906SEric.Saxe@Sun.COM end = gethrtime_unscaled();
6138906SEric.Saxe@Sun.COM
6148906SEric.Saxe@Sun.COM /*
6158906SEric.Saxe@Sun.COM * Update statistics
6168906SEric.Saxe@Sun.COM */
6178906SEric.Saxe@Sun.COM cpupm_wakeup_cstate_data(cs_data, end);
6188906SEric.Saxe@Sun.COM }
6198906SEric.Saxe@Sun.COM
6208906SEric.Saxe@Sun.COM boolean_t
cpu_deep_cstates_supported(void)6218906SEric.Saxe@Sun.COM cpu_deep_cstates_supported(void)
6228906SEric.Saxe@Sun.COM {
6238906SEric.Saxe@Sun.COM extern int idle_cpu_no_deep_c;
6248906SEric.Saxe@Sun.COM
6258906SEric.Saxe@Sun.COM if (idle_cpu_no_deep_c)
6268906SEric.Saxe@Sun.COM return (B_FALSE);
6278906SEric.Saxe@Sun.COM
6288906SEric.Saxe@Sun.COM if (!cpuid_deep_cstates_supported())
6298906SEric.Saxe@Sun.COM return (B_FALSE);
6308906SEric.Saxe@Sun.COM
6319283SBill.Holler@Sun.COM if (cpuid_arat_supported()) {
6329283SBill.Holler@Sun.COM cpu_cstate_arat = B_TRUE;
6339283SBill.Holler@Sun.COM return (B_TRUE);
6349283SBill.Holler@Sun.COM }
6358906SEric.Saxe@Sun.COM
6369283SBill.Holler@Sun.COM if ((hpet.supported == HPET_FULL_SUPPORT) &&
6379283SBill.Holler@Sun.COM hpet.install_proxy()) {
6389283SBill.Holler@Sun.COM cpu_cstate_hpet = B_TRUE;
6399283SBill.Holler@Sun.COM return (B_TRUE);
6409283SBill.Holler@Sun.COM }
6419283SBill.Holler@Sun.COM
6429283SBill.Holler@Sun.COM return (B_FALSE);
6438906SEric.Saxe@Sun.COM }
6448906SEric.Saxe@Sun.COM
6458906SEric.Saxe@Sun.COM /*
6468906SEric.Saxe@Sun.COM * Validate that this processor supports deep cstate and if so,
6478906SEric.Saxe@Sun.COM * get the c-state data from ACPI and cache it.
6488906SEric.Saxe@Sun.COM */
6498906SEric.Saxe@Sun.COM static int
cpu_idle_init(cpu_t * cp)6508906SEric.Saxe@Sun.COM cpu_idle_init(cpu_t *cp)
6518906SEric.Saxe@Sun.COM {
6528906SEric.Saxe@Sun.COM cpupm_mach_state_t *mach_state =
6538906SEric.Saxe@Sun.COM (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
6548906SEric.Saxe@Sun.COM cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
6558906SEric.Saxe@Sun.COM cpu_acpi_cstate_t *cstate;
6568906SEric.Saxe@Sun.COM char name[KSTAT_STRLEN];
6578906SEric.Saxe@Sun.COM int cpu_max_cstates, i;
65810075SMark.Haywood@Sun.COM int ret;
6598906SEric.Saxe@Sun.COM
6608906SEric.Saxe@Sun.COM /*
6618906SEric.Saxe@Sun.COM * Cache the C-state specific ACPI data.
6628906SEric.Saxe@Sun.COM */
66310075SMark.Haywood@Sun.COM if ((ret = cpu_acpi_cache_cstate_data(handle)) != 0) {
66410075SMark.Haywood@Sun.COM if (ret < 0)
66510075SMark.Haywood@Sun.COM cmn_err(CE_NOTE,
66610075SMark.Haywood@Sun.COM "!Support for CPU deep idle states is being "
66710075SMark.Haywood@Sun.COM "disabled due to errors parsing ACPI C-state "
66810075SMark.Haywood@Sun.COM "objects exported by BIOS.");
6698906SEric.Saxe@Sun.COM cpu_idle_fini(cp);
6708906SEric.Saxe@Sun.COM return (-1);
6718906SEric.Saxe@Sun.COM }
6728906SEric.Saxe@Sun.COM
6738906SEric.Saxe@Sun.COM cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
6748906SEric.Saxe@Sun.COM
6758906SEric.Saxe@Sun.COM cpu_max_cstates = cpu_acpi_get_max_cstates(handle);
6768906SEric.Saxe@Sun.COM
6778906SEric.Saxe@Sun.COM for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) {
6788906SEric.Saxe@Sun.COM (void) snprintf(name, KSTAT_STRLEN - 1, "c%d", cstate->cs_type);
6798906SEric.Saxe@Sun.COM /*
6808906SEric.Saxe@Sun.COM * Allocate, initialize and install cstate kstat
6818906SEric.Saxe@Sun.COM */
682*12004Sjiang.liu@intel.com cstate->cs_ksp = kstat_create("cstate", cp->cpu_id,
6838906SEric.Saxe@Sun.COM name, "misc",
6848906SEric.Saxe@Sun.COM KSTAT_TYPE_NAMED,
6858906SEric.Saxe@Sun.COM sizeof (cpu_idle_kstat) / sizeof (kstat_named_t),
6868906SEric.Saxe@Sun.COM KSTAT_FLAG_VIRTUAL);
6878906SEric.Saxe@Sun.COM
6888906SEric.Saxe@Sun.COM if (cstate->cs_ksp == NULL) {
6898906SEric.Saxe@Sun.COM cmn_err(CE_NOTE, "kstat_create(c_state) fail");
6908906SEric.Saxe@Sun.COM } else {
6918906SEric.Saxe@Sun.COM cstate->cs_ksp->ks_data = &cpu_idle_kstat;
6928906SEric.Saxe@Sun.COM cstate->cs_ksp->ks_lock = &cpu_idle_mutex;
6938906SEric.Saxe@Sun.COM cstate->cs_ksp->ks_update = cpu_idle_kstat_update;
6948906SEric.Saxe@Sun.COM cstate->cs_ksp->ks_data_size += MAXNAMELEN;
6958906SEric.Saxe@Sun.COM cstate->cs_ksp->ks_private = cstate;
6968906SEric.Saxe@Sun.COM kstat_install(cstate->cs_ksp);
6978906SEric.Saxe@Sun.COM }
698*12004Sjiang.liu@intel.com cstate++;
6998906SEric.Saxe@Sun.COM }
7008906SEric.Saxe@Sun.COM
7018906SEric.Saxe@Sun.COM cpupm_alloc_domains(cp, CPUPM_C_STATES);
7028906SEric.Saxe@Sun.COM cpupm_alloc_ms_cstate(cp);
7038906SEric.Saxe@Sun.COM
7048906SEric.Saxe@Sun.COM if (cpu_deep_cstates_supported()) {
70510447SBill.Holler@Sun.COM uint32_t value;
70610447SBill.Holler@Sun.COM
7078906SEric.Saxe@Sun.COM mutex_enter(&cpu_idle_callb_mutex);
7088906SEric.Saxe@Sun.COM if (cpu_deep_idle_callb_id == (callb_id_t)0)
7098906SEric.Saxe@Sun.COM cpu_deep_idle_callb_id = callb_add(&cpu_deep_idle_callb,
7108906SEric.Saxe@Sun.COM (void *)NULL, CB_CL_CPU_DEEP_IDLE, "cpu_deep_idle");
7118906SEric.Saxe@Sun.COM if (cpu_idle_cpr_callb_id == (callb_id_t)0)
7128906SEric.Saxe@Sun.COM cpu_idle_cpr_callb_id = callb_add(&cpu_idle_cpr_callb,
7138906SEric.Saxe@Sun.COM (void *)NULL, CB_CL_CPR_PM, "cpu_idle_cpr");
7148906SEric.Saxe@Sun.COM mutex_exit(&cpu_idle_callb_mutex);
71510447SBill.Holler@Sun.COM
71610447SBill.Holler@Sun.COM
71710447SBill.Holler@Sun.COM /*
71810447SBill.Holler@Sun.COM * All supported CPUs (Nehalem and later) will remain in C3
71910447SBill.Holler@Sun.COM * during Bus Master activity.
72010447SBill.Holler@Sun.COM * All CPUs set ACPI_BITREG_BUS_MASTER_RLD to 0 here if it
72110447SBill.Holler@Sun.COM * is not already 0 before enabling Deeper C-states.
72210447SBill.Holler@Sun.COM */
72310447SBill.Holler@Sun.COM cpu_acpi_get_register(ACPI_BITREG_BUS_MASTER_RLD, &value);
72410447SBill.Holler@Sun.COM if (value & 1)
72510447SBill.Holler@Sun.COM cpu_acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
7268906SEric.Saxe@Sun.COM }
7278906SEric.Saxe@Sun.COM
7288906SEric.Saxe@Sun.COM return (0);
7298906SEric.Saxe@Sun.COM }
7308906SEric.Saxe@Sun.COM
7318906SEric.Saxe@Sun.COM /*
7328906SEric.Saxe@Sun.COM * Free resources allocated by cpu_idle_init().
7338906SEric.Saxe@Sun.COM */
7348906SEric.Saxe@Sun.COM static void
cpu_idle_fini(cpu_t * cp)7358906SEric.Saxe@Sun.COM cpu_idle_fini(cpu_t *cp)
7368906SEric.Saxe@Sun.COM {
7378906SEric.Saxe@Sun.COM cpupm_mach_state_t *mach_state =
7388906SEric.Saxe@Sun.COM (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
7398906SEric.Saxe@Sun.COM cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
7408906SEric.Saxe@Sun.COM cpu_acpi_cstate_t *cstate;
7418906SEric.Saxe@Sun.COM uint_t cpu_max_cstates, i;
7428906SEric.Saxe@Sun.COM
7438906SEric.Saxe@Sun.COM /*
7448906SEric.Saxe@Sun.COM * idle cpu points back to the generic one
7458906SEric.Saxe@Sun.COM */
74610961Saubrey.li@intel.com idle_cpu = cp->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu;
7478906SEric.Saxe@Sun.COM disp_enq_thread = non_deep_idle_disp_enq_thread;
7488906SEric.Saxe@Sun.COM
7498906SEric.Saxe@Sun.COM cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
7508906SEric.Saxe@Sun.COM if (cstate) {
7518906SEric.Saxe@Sun.COM cpu_max_cstates = cpu_acpi_get_max_cstates(handle);
7528906SEric.Saxe@Sun.COM
7538906SEric.Saxe@Sun.COM for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) {
7548906SEric.Saxe@Sun.COM if (cstate->cs_ksp != NULL)
7558906SEric.Saxe@Sun.COM kstat_delete(cstate->cs_ksp);
7568906SEric.Saxe@Sun.COM cstate++;
7578906SEric.Saxe@Sun.COM }
7588906SEric.Saxe@Sun.COM }
7598906SEric.Saxe@Sun.COM
7608906SEric.Saxe@Sun.COM cpupm_free_ms_cstate(cp);
7618906SEric.Saxe@Sun.COM cpupm_free_domains(&cpupm_cstate_domains);
7628906SEric.Saxe@Sun.COM cpu_acpi_free_cstate_data(handle);
7638906SEric.Saxe@Sun.COM
7648906SEric.Saxe@Sun.COM mutex_enter(&cpu_idle_callb_mutex);
7658906SEric.Saxe@Sun.COM if (cpu_deep_idle_callb_id != (callb_id_t)0) {
7668906SEric.Saxe@Sun.COM (void) callb_delete(cpu_deep_idle_callb_id);
7678906SEric.Saxe@Sun.COM cpu_deep_idle_callb_id = (callb_id_t)0;
7688906SEric.Saxe@Sun.COM }
7698906SEric.Saxe@Sun.COM if (cpu_idle_cpr_callb_id != (callb_id_t)0) {
7708906SEric.Saxe@Sun.COM (void) callb_delete(cpu_idle_cpr_callb_id);
7718906SEric.Saxe@Sun.COM cpu_idle_cpr_callb_id = (callb_id_t)0;
7728906SEric.Saxe@Sun.COM }
7738906SEric.Saxe@Sun.COM mutex_exit(&cpu_idle_callb_mutex);
7748906SEric.Saxe@Sun.COM }
7758906SEric.Saxe@Sun.COM
776*12004Sjiang.liu@intel.com /*
777*12004Sjiang.liu@intel.com * This function is introduced here to solve a race condition
778*12004Sjiang.liu@intel.com * between the master and the slave to touch c-state data structure.
779*12004Sjiang.liu@intel.com * After the slave calls this idle function to switch to the non
780*12004Sjiang.liu@intel.com * deep idle function, the master can go on to reclaim the resource.
781*12004Sjiang.liu@intel.com */
782*12004Sjiang.liu@intel.com static void
cpu_idle_stop_sync(void)783*12004Sjiang.liu@intel.com cpu_idle_stop_sync(void)
784*12004Sjiang.liu@intel.com {
785*12004Sjiang.liu@intel.com /* switch to the non deep idle function */
786*12004Sjiang.liu@intel.com CPU->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu;
787*12004Sjiang.liu@intel.com }
788*12004Sjiang.liu@intel.com
78910488SMark.Haywood@Sun.COM static void
cpu_idle_stop(cpu_t * cp)79010488SMark.Haywood@Sun.COM cpu_idle_stop(cpu_t *cp)
79110488SMark.Haywood@Sun.COM {
79210488SMark.Haywood@Sun.COM cpupm_mach_state_t *mach_state =
79310488SMark.Haywood@Sun.COM (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
79410488SMark.Haywood@Sun.COM cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
79510488SMark.Haywood@Sun.COM cpu_acpi_cstate_t *cstate;
796*12004Sjiang.liu@intel.com uint_t cpu_max_cstates, i = 0;
797*12004Sjiang.liu@intel.com
798*12004Sjiang.liu@intel.com mutex_enter(&cpu_idle_callb_mutex);
799*12004Sjiang.liu@intel.com if (idle_cpu == cpu_idle_adaptive) {
800*12004Sjiang.liu@intel.com /*
801*12004Sjiang.liu@intel.com * invoke the slave to call synchronous idle function.
802*12004Sjiang.liu@intel.com */
803*12004Sjiang.liu@intel.com cp->cpu_m.mcpu_idle_cpu = cpu_idle_stop_sync;
804*12004Sjiang.liu@intel.com poke_cpu(cp->cpu_id);
80510488SMark.Haywood@Sun.COM
806*12004Sjiang.liu@intel.com /*
807*12004Sjiang.liu@intel.com * wait until the slave switchs to non deep idle function,
808*12004Sjiang.liu@intel.com * so that the master is safe to go on to reclaim the resource.
809*12004Sjiang.liu@intel.com */
810*12004Sjiang.liu@intel.com while (cp->cpu_m.mcpu_idle_cpu != non_deep_idle_cpu) {
811*12004Sjiang.liu@intel.com drv_usecwait(10);
812*12004Sjiang.liu@intel.com if ((++i % CPU_IDLE_STOP_TIMEOUT) == 0)
813*12004Sjiang.liu@intel.com cmn_err(CE_NOTE, "!cpu_idle_stop: the slave"
814*12004Sjiang.liu@intel.com " idle stop timeout");
815*12004Sjiang.liu@intel.com }
816*12004Sjiang.liu@intel.com }
817*12004Sjiang.liu@intel.com mutex_exit(&cpu_idle_callb_mutex);
81810961Saubrey.li@intel.com
81910488SMark.Haywood@Sun.COM cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
82010488SMark.Haywood@Sun.COM if (cstate) {
82110488SMark.Haywood@Sun.COM cpu_max_cstates = cpu_acpi_get_max_cstates(handle);
82210488SMark.Haywood@Sun.COM
82310488SMark.Haywood@Sun.COM for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) {
82410488SMark.Haywood@Sun.COM if (cstate->cs_ksp != NULL)
82510488SMark.Haywood@Sun.COM kstat_delete(cstate->cs_ksp);
82610488SMark.Haywood@Sun.COM cstate++;
82710488SMark.Haywood@Sun.COM }
82810488SMark.Haywood@Sun.COM }
82910488SMark.Haywood@Sun.COM cpupm_free_ms_cstate(cp);
83010488SMark.Haywood@Sun.COM cpupm_remove_domains(cp, CPUPM_C_STATES, &cpupm_cstate_domains);
83110488SMark.Haywood@Sun.COM cpu_acpi_free_cstate_data(handle);
83210488SMark.Haywood@Sun.COM }
83310488SMark.Haywood@Sun.COM
8348906SEric.Saxe@Sun.COM /*ARGSUSED*/
8358906SEric.Saxe@Sun.COM static boolean_t
cpu_deep_idle_callb(void * arg,int code)8368906SEric.Saxe@Sun.COM cpu_deep_idle_callb(void *arg, int code)
8378906SEric.Saxe@Sun.COM {
8388906SEric.Saxe@Sun.COM boolean_t rslt = B_TRUE;
8398906SEric.Saxe@Sun.COM
8408906SEric.Saxe@Sun.COM mutex_enter(&cpu_idle_callb_mutex);
8418906SEric.Saxe@Sun.COM switch (code) {
8428906SEric.Saxe@Sun.COM case PM_DEFAULT_CPU_DEEP_IDLE:
8438906SEric.Saxe@Sun.COM /*
8448906SEric.Saxe@Sun.COM * Default policy is same as enable
8458906SEric.Saxe@Sun.COM */
8468906SEric.Saxe@Sun.COM /*FALLTHROUGH*/
8478906SEric.Saxe@Sun.COM case PM_ENABLE_CPU_DEEP_IDLE:
8488906SEric.Saxe@Sun.COM if ((cpu_idle_cfg_state & CPU_IDLE_DEEP_CFG) == 0)
8498906SEric.Saxe@Sun.COM break;
8508906SEric.Saxe@Sun.COM
8519283SBill.Holler@Sun.COM if (cstate_timer_callback(PM_ENABLE_CPU_DEEP_IDLE)) {
8528906SEric.Saxe@Sun.COM disp_enq_thread = cstate_wakeup;
8538906SEric.Saxe@Sun.COM idle_cpu = cpu_idle_adaptive;
8548906SEric.Saxe@Sun.COM cpu_idle_cfg_state &= ~CPU_IDLE_DEEP_CFG;
8558906SEric.Saxe@Sun.COM } else {
8568906SEric.Saxe@Sun.COM rslt = B_FALSE;
8578906SEric.Saxe@Sun.COM }
8588906SEric.Saxe@Sun.COM break;
8598906SEric.Saxe@Sun.COM
8608906SEric.Saxe@Sun.COM case PM_DISABLE_CPU_DEEP_IDLE:
8618906SEric.Saxe@Sun.COM if (cpu_idle_cfg_state & CPU_IDLE_DEEP_CFG)
8628906SEric.Saxe@Sun.COM break;
8638906SEric.Saxe@Sun.COM
8648906SEric.Saxe@Sun.COM idle_cpu = non_deep_idle_cpu;
8659283SBill.Holler@Sun.COM if (cstate_timer_callback(PM_DISABLE_CPU_DEEP_IDLE)) {
8668906SEric.Saxe@Sun.COM disp_enq_thread = non_deep_idle_disp_enq_thread;
8678906SEric.Saxe@Sun.COM cpu_idle_cfg_state |= CPU_IDLE_DEEP_CFG;
8688906SEric.Saxe@Sun.COM }
8698906SEric.Saxe@Sun.COM break;
8708906SEric.Saxe@Sun.COM
8718906SEric.Saxe@Sun.COM default:
8728906SEric.Saxe@Sun.COM cmn_err(CE_NOTE, "!cpu deep_idle_callb: invalid code %d\n",
8738906SEric.Saxe@Sun.COM code);
8748906SEric.Saxe@Sun.COM break;
8758906SEric.Saxe@Sun.COM }
8768906SEric.Saxe@Sun.COM mutex_exit(&cpu_idle_callb_mutex);
8778906SEric.Saxe@Sun.COM return (rslt);
8788906SEric.Saxe@Sun.COM }
8798906SEric.Saxe@Sun.COM
8808906SEric.Saxe@Sun.COM /*ARGSUSED*/
8818906SEric.Saxe@Sun.COM static boolean_t
cpu_idle_cpr_callb(void * arg,int code)8828906SEric.Saxe@Sun.COM cpu_idle_cpr_callb(void *arg, int code)
8838906SEric.Saxe@Sun.COM {
8848906SEric.Saxe@Sun.COM boolean_t rslt = B_TRUE;
8858906SEric.Saxe@Sun.COM
8868906SEric.Saxe@Sun.COM mutex_enter(&cpu_idle_callb_mutex);
8878906SEric.Saxe@Sun.COM switch (code) {
8888906SEric.Saxe@Sun.COM case CB_CODE_CPR_RESUME:
8899283SBill.Holler@Sun.COM if (cstate_timer_callback(CB_CODE_CPR_RESUME)) {
8908906SEric.Saxe@Sun.COM /*
8918906SEric.Saxe@Sun.COM * Do not enable dispatcher hooks if disabled by user.
8928906SEric.Saxe@Sun.COM */
8938906SEric.Saxe@Sun.COM if (cpu_idle_cfg_state & CPU_IDLE_DEEP_CFG)
8948906SEric.Saxe@Sun.COM break;
8958906SEric.Saxe@Sun.COM
8968906SEric.Saxe@Sun.COM disp_enq_thread = cstate_wakeup;
8978906SEric.Saxe@Sun.COM idle_cpu = cpu_idle_adaptive;
8988906SEric.Saxe@Sun.COM } else {
8998906SEric.Saxe@Sun.COM rslt = B_FALSE;
9008906SEric.Saxe@Sun.COM }
9018906SEric.Saxe@Sun.COM break;
9028906SEric.Saxe@Sun.COM
9038906SEric.Saxe@Sun.COM case CB_CODE_CPR_CHKPT:
9048906SEric.Saxe@Sun.COM idle_cpu = non_deep_idle_cpu;
9058906SEric.Saxe@Sun.COM disp_enq_thread = non_deep_idle_disp_enq_thread;
9069283SBill.Holler@Sun.COM (void) cstate_timer_callback(CB_CODE_CPR_CHKPT);
9078906SEric.Saxe@Sun.COM break;
9088906SEric.Saxe@Sun.COM
9098906SEric.Saxe@Sun.COM default:
9108906SEric.Saxe@Sun.COM cmn_err(CE_NOTE, "!cpudvr cpr_callb: invalid code %d\n", code);
9118906SEric.Saxe@Sun.COM break;
9128906SEric.Saxe@Sun.COM }
9138906SEric.Saxe@Sun.COM mutex_exit(&cpu_idle_callb_mutex);
9148906SEric.Saxe@Sun.COM return (rslt);
9158906SEric.Saxe@Sun.COM }
9168906SEric.Saxe@Sun.COM
9178906SEric.Saxe@Sun.COM /*
9188906SEric.Saxe@Sun.COM * handle _CST notification
9198906SEric.Saxe@Sun.COM */
9208906SEric.Saxe@Sun.COM void
cpuidle_cstate_instance(cpu_t * cp)9218906SEric.Saxe@Sun.COM cpuidle_cstate_instance(cpu_t *cp)
9228906SEric.Saxe@Sun.COM {
9238906SEric.Saxe@Sun.COM #ifndef __xpv
9248906SEric.Saxe@Sun.COM cpupm_mach_state_t *mach_state =
9258906SEric.Saxe@Sun.COM (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
9268906SEric.Saxe@Sun.COM cpu_acpi_handle_t handle;
9278906SEric.Saxe@Sun.COM struct machcpu *mcpu;
9288906SEric.Saxe@Sun.COM cpuset_t dom_cpu_set;
9298906SEric.Saxe@Sun.COM kmutex_t *pm_lock;
9308906SEric.Saxe@Sun.COM int result = 0;
9318906SEric.Saxe@Sun.COM processorid_t cpu_id;
9328906SEric.Saxe@Sun.COM
9338906SEric.Saxe@Sun.COM if (mach_state == NULL) {
9348906SEric.Saxe@Sun.COM return;
9358906SEric.Saxe@Sun.COM }
9368906SEric.Saxe@Sun.COM
9378906SEric.Saxe@Sun.COM ASSERT(mach_state->ms_cstate.cma_domain != NULL);
9388906SEric.Saxe@Sun.COM dom_cpu_set = mach_state->ms_cstate.cma_domain->pm_cpus;
9398906SEric.Saxe@Sun.COM pm_lock = &mach_state->ms_cstate.cma_domain->pm_lock;
9408906SEric.Saxe@Sun.COM
9418906SEric.Saxe@Sun.COM /*
9428906SEric.Saxe@Sun.COM * Do for all the CPU's in the domain
9438906SEric.Saxe@Sun.COM */
9448906SEric.Saxe@Sun.COM mutex_enter(pm_lock);
9458906SEric.Saxe@Sun.COM do {
9468906SEric.Saxe@Sun.COM CPUSET_FIND(dom_cpu_set, cpu_id);
9478906SEric.Saxe@Sun.COM if (cpu_id == CPUSET_NOTINSET)
9488906SEric.Saxe@Sun.COM break;
9498906SEric.Saxe@Sun.COM
9508906SEric.Saxe@Sun.COM ASSERT(cpu_id >= 0 && cpu_id < NCPU);
9518906SEric.Saxe@Sun.COM cp = cpu[cpu_id];
9528906SEric.Saxe@Sun.COM mach_state = (cpupm_mach_state_t *)
9538906SEric.Saxe@Sun.COM cp->cpu_m.mcpu_pm_mach_state;
9548906SEric.Saxe@Sun.COM if (!(mach_state->ms_caps & CPUPM_C_STATES)) {
9558906SEric.Saxe@Sun.COM mutex_exit(pm_lock);
9568906SEric.Saxe@Sun.COM return;
9578906SEric.Saxe@Sun.COM }
9588906SEric.Saxe@Sun.COM handle = mach_state->ms_acpi_handle;
9598906SEric.Saxe@Sun.COM ASSERT(handle != NULL);
9608906SEric.Saxe@Sun.COM
9618906SEric.Saxe@Sun.COM /*
9628906SEric.Saxe@Sun.COM * re-evaluate cstate object
9638906SEric.Saxe@Sun.COM */
9648906SEric.Saxe@Sun.COM if (cpu_acpi_cache_cstate_data(handle) != 0) {
9658906SEric.Saxe@Sun.COM cmn_err(CE_WARN, "Cannot re-evaluate the cpu c-state"
9668906SEric.Saxe@Sun.COM " object Instance: %d", cpu_id);
9678906SEric.Saxe@Sun.COM }
9688906SEric.Saxe@Sun.COM mcpu = &(cp->cpu_m);
9698906SEric.Saxe@Sun.COM mcpu->max_cstates = cpu_acpi_get_max_cstates(handle);
9708906SEric.Saxe@Sun.COM if (mcpu->max_cstates > CPU_ACPI_C1) {
9719283SBill.Holler@Sun.COM (void) cstate_timer_callback(
9729283SBill.Holler@Sun.COM CST_EVENT_MULTIPLE_CSTATES);
9738906SEric.Saxe@Sun.COM disp_enq_thread = cstate_wakeup;
9748906SEric.Saxe@Sun.COM cp->cpu_m.mcpu_idle_cpu = cpu_acpi_idle;
9758906SEric.Saxe@Sun.COM } else if (mcpu->max_cstates == CPU_ACPI_C1) {
9768906SEric.Saxe@Sun.COM disp_enq_thread = non_deep_idle_disp_enq_thread;
9778906SEric.Saxe@Sun.COM cp->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu;
9789283SBill.Holler@Sun.COM (void) cstate_timer_callback(CST_EVENT_ONE_CSTATE);
9798906SEric.Saxe@Sun.COM }
9808906SEric.Saxe@Sun.COM
9818906SEric.Saxe@Sun.COM CPUSET_ATOMIC_XDEL(dom_cpu_set, cpu_id, result);
9828906SEric.Saxe@Sun.COM } while (result < 0);
98310488SMark.Haywood@Sun.COM mutex_exit(pm_lock);
9848906SEric.Saxe@Sun.COM #endif
9858906SEric.Saxe@Sun.COM }
9868906SEric.Saxe@Sun.COM
9878906SEric.Saxe@Sun.COM /*
9888906SEric.Saxe@Sun.COM * handle the number or the type of available processor power states change
9898906SEric.Saxe@Sun.COM */
9908906SEric.Saxe@Sun.COM void
cpuidle_manage_cstates(void * ctx)9918906SEric.Saxe@Sun.COM cpuidle_manage_cstates(void *ctx)
9928906SEric.Saxe@Sun.COM {
9938906SEric.Saxe@Sun.COM cpu_t *cp = ctx;
9948906SEric.Saxe@Sun.COM cpupm_mach_state_t *mach_state =
9958906SEric.Saxe@Sun.COM (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
9968906SEric.Saxe@Sun.COM boolean_t is_ready;
9978906SEric.Saxe@Sun.COM
9988906SEric.Saxe@Sun.COM if (mach_state == NULL) {
9998906SEric.Saxe@Sun.COM return;
10008906SEric.Saxe@Sun.COM }
10018906SEric.Saxe@Sun.COM
10028906SEric.Saxe@Sun.COM /*
10038906SEric.Saxe@Sun.COM * We currently refuse to power manage if the CPU is not ready to
10048906SEric.Saxe@Sun.COM * take cross calls (cross calls fail silently if CPU is not ready
10058906SEric.Saxe@Sun.COM * for it).
10068906SEric.Saxe@Sun.COM *
100710488SMark.Haywood@Sun.COM * Additionally, for x86 platforms we cannot power manage an instance,
100810488SMark.Haywood@Sun.COM * until it has been initialized.
10098906SEric.Saxe@Sun.COM */
101010488SMark.Haywood@Sun.COM is_ready = (cp->cpu_flags & CPU_READY) && cpupm_cstate_ready(cp);
10118906SEric.Saxe@Sun.COM if (!is_ready)
10128906SEric.Saxe@Sun.COM return;
10138906SEric.Saxe@Sun.COM
10148906SEric.Saxe@Sun.COM cpuidle_cstate_instance(cp);
10158906SEric.Saxe@Sun.COM }
1016