18906SEric.Saxe@Sun.COM /* 28906SEric.Saxe@Sun.COM * CDDL HEADER START 38906SEric.Saxe@Sun.COM * 48906SEric.Saxe@Sun.COM * The contents of this file are subject to the terms of the 58906SEric.Saxe@Sun.COM * Common Development and Distribution License (the "License"). 68906SEric.Saxe@Sun.COM * You may not use this file except in compliance with the License. 78906SEric.Saxe@Sun.COM * 88906SEric.Saxe@Sun.COM * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 98906SEric.Saxe@Sun.COM * or http://www.opensolaris.org/os/licensing. 108906SEric.Saxe@Sun.COM * See the License for the specific language governing permissions 118906SEric.Saxe@Sun.COM * and limitations under the License. 128906SEric.Saxe@Sun.COM * 138906SEric.Saxe@Sun.COM * When distributing Covered Code, include this CDDL HEADER in each 148906SEric.Saxe@Sun.COM * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 158906SEric.Saxe@Sun.COM * If applicable, add the following below this CDDL HEADER, with the 168906SEric.Saxe@Sun.COM * fields enclosed by brackets "[]" replaced with your own identifying 178906SEric.Saxe@Sun.COM * information: Portions Copyright [yyyy] [name of copyright owner] 188906SEric.Saxe@Sun.COM * 198906SEric.Saxe@Sun.COM * CDDL HEADER END 208906SEric.Saxe@Sun.COM */ 218906SEric.Saxe@Sun.COM /* 228906SEric.Saxe@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 238906SEric.Saxe@Sun.COM * Use is subject to license terms. 248906SEric.Saxe@Sun.COM */ 259283SBill.Holler@Sun.COM /* 269283SBill.Holler@Sun.COM * Copyright (c) 2009, Intel Corporation. 279283SBill.Holler@Sun.COM * All rights reserved. 289283SBill.Holler@Sun.COM */ 298906SEric.Saxe@Sun.COM 308906SEric.Saxe@Sun.COM #include <sys/x86_archext.h> 318906SEric.Saxe@Sun.COM #include <sys/machsystm.h> 328906SEric.Saxe@Sun.COM #include <sys/x_call.h> 338906SEric.Saxe@Sun.COM #include <sys/stat.h> 348906SEric.Saxe@Sun.COM #include <sys/acpi/acpi.h> 358906SEric.Saxe@Sun.COM #include <sys/acpica.h> 368906SEric.Saxe@Sun.COM #include <sys/cpu_acpi.h> 378906SEric.Saxe@Sun.COM #include <sys/cpu_idle.h> 388906SEric.Saxe@Sun.COM #include <sys/cpupm.h> 399637SRandy.Fishel@Sun.COM #include <sys/cpu_event.h> 408906SEric.Saxe@Sun.COM #include <sys/hpet.h> 418906SEric.Saxe@Sun.COM #include <sys/archsystm.h> 428906SEric.Saxe@Sun.COM #include <vm/hat_i86.h> 438906SEric.Saxe@Sun.COM #include <sys/dtrace.h> 448906SEric.Saxe@Sun.COM #include <sys/sdt.h> 458906SEric.Saxe@Sun.COM #include <sys/callb.h> 468906SEric.Saxe@Sun.COM 479283SBill.Holler@Sun.COM #define CSTATE_USING_HPET 1 489283SBill.Holler@Sun.COM #define CSTATE_USING_LAT 2 499283SBill.Holler@Sun.COM 508906SEric.Saxe@Sun.COM extern void cpu_idle_adaptive(void); 518983SBill.Holler@Sun.COM extern uint32_t cpupm_next_cstate(cma_c_state_t *cs_data, 528983SBill.Holler@Sun.COM cpu_acpi_cstate_t *cstates, uint32_t cs_count, hrtime_t start); 538906SEric.Saxe@Sun.COM 548906SEric.Saxe@Sun.COM static int cpu_idle_init(cpu_t *); 558906SEric.Saxe@Sun.COM static void cpu_idle_fini(cpu_t *); 5610488SMark.Haywood@Sun.COM static void cpu_idle_stop(cpu_t *); 578906SEric.Saxe@Sun.COM static boolean_t cpu_deep_idle_callb(void *arg, int code); 588906SEric.Saxe@Sun.COM static boolean_t cpu_idle_cpr_callb(void *arg, int code); 598906SEric.Saxe@Sun.COM static void acpi_cpu_cstate(cpu_acpi_cstate_t *cstate); 608906SEric.Saxe@Sun.COM 619283SBill.Holler@Sun.COM static boolean_t cstate_use_timer(hrtime_t *lapic_expire, int timer); 629283SBill.Holler@Sun.COM 639283SBill.Holler@Sun.COM /* 649283SBill.Holler@Sun.COM * the flag of always-running local APIC timer. 659283SBill.Holler@Sun.COM * the flag of HPET Timer use in deep cstate. 669283SBill.Holler@Sun.COM */ 679283SBill.Holler@Sun.COM static boolean_t cpu_cstate_arat = B_FALSE; 689283SBill.Holler@Sun.COM static boolean_t cpu_cstate_hpet = B_FALSE; 699283SBill.Holler@Sun.COM 708906SEric.Saxe@Sun.COM /* 718906SEric.Saxe@Sun.COM * Interfaces for modules implementing Intel's deep c-state. 728906SEric.Saxe@Sun.COM */ 738906SEric.Saxe@Sun.COM cpupm_state_ops_t cpu_idle_ops = { 748906SEric.Saxe@Sun.COM "Generic ACPI C-state Support", 758906SEric.Saxe@Sun.COM cpu_idle_init, 768906SEric.Saxe@Sun.COM cpu_idle_fini, 7710488SMark.Haywood@Sun.COM NULL, 7810488SMark.Haywood@Sun.COM cpu_idle_stop 798906SEric.Saxe@Sun.COM }; 808906SEric.Saxe@Sun.COM 818906SEric.Saxe@Sun.COM static kmutex_t cpu_idle_callb_mutex; 828906SEric.Saxe@Sun.COM static callb_id_t cpu_deep_idle_callb_id; 838906SEric.Saxe@Sun.COM static callb_id_t cpu_idle_cpr_callb_id; 848906SEric.Saxe@Sun.COM static uint_t cpu_idle_cfg_state; 858906SEric.Saxe@Sun.COM 868906SEric.Saxe@Sun.COM static kmutex_t cpu_idle_mutex; 878906SEric.Saxe@Sun.COM 888906SEric.Saxe@Sun.COM cpu_idle_kstat_t cpu_idle_kstat = { 898906SEric.Saxe@Sun.COM { "address_space_id", KSTAT_DATA_STRING }, 908906SEric.Saxe@Sun.COM { "latency", KSTAT_DATA_UINT32 }, 918906SEric.Saxe@Sun.COM { "power", KSTAT_DATA_UINT32 }, 928906SEric.Saxe@Sun.COM }; 938906SEric.Saxe@Sun.COM 948906SEric.Saxe@Sun.COM /* 958906SEric.Saxe@Sun.COM * kstat update function of the c-state info 968906SEric.Saxe@Sun.COM */ 978906SEric.Saxe@Sun.COM static int 988906SEric.Saxe@Sun.COM cpu_idle_kstat_update(kstat_t *ksp, int flag) 998906SEric.Saxe@Sun.COM { 1008906SEric.Saxe@Sun.COM cpu_acpi_cstate_t *cstate = ksp->ks_private; 1018906SEric.Saxe@Sun.COM 1028906SEric.Saxe@Sun.COM if (flag == KSTAT_WRITE) { 1038906SEric.Saxe@Sun.COM return (EACCES); 1048906SEric.Saxe@Sun.COM } 1058906SEric.Saxe@Sun.COM 1068906SEric.Saxe@Sun.COM if (cstate->cs_addrspace_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 1078906SEric.Saxe@Sun.COM kstat_named_setstr(&cpu_idle_kstat.addr_space_id, 1088906SEric.Saxe@Sun.COM "FFixedHW"); 1098906SEric.Saxe@Sun.COM } else if (cstate->cs_addrspace_id == ACPI_ADR_SPACE_SYSTEM_IO) { 1108906SEric.Saxe@Sun.COM kstat_named_setstr(&cpu_idle_kstat.addr_space_id, 1118906SEric.Saxe@Sun.COM "SystemIO"); 1128906SEric.Saxe@Sun.COM } else { 1138906SEric.Saxe@Sun.COM kstat_named_setstr(&cpu_idle_kstat.addr_space_id, 1148906SEric.Saxe@Sun.COM "Unsupported"); 1158906SEric.Saxe@Sun.COM } 1168906SEric.Saxe@Sun.COM 1178906SEric.Saxe@Sun.COM cpu_idle_kstat.cs_latency.value.ui32 = cstate->cs_latency; 1188906SEric.Saxe@Sun.COM cpu_idle_kstat.cs_power.value.ui32 = cstate->cs_power; 1198906SEric.Saxe@Sun.COM 1208906SEric.Saxe@Sun.COM return (0); 1218906SEric.Saxe@Sun.COM } 1228906SEric.Saxe@Sun.COM 1238906SEric.Saxe@Sun.COM /* 1249283SBill.Holler@Sun.COM * Used during configuration callbacks to manage implementation specific 1259283SBill.Holler@Sun.COM * details of the hardware timer used during Deep C-state. 1269283SBill.Holler@Sun.COM */ 1279283SBill.Holler@Sun.COM boolean_t 1289283SBill.Holler@Sun.COM cstate_timer_callback(int code) 1299283SBill.Holler@Sun.COM { 1309283SBill.Holler@Sun.COM if (cpu_cstate_arat) { 1319283SBill.Holler@Sun.COM return (B_TRUE); 1329283SBill.Holler@Sun.COM } else if (cpu_cstate_hpet) { 1339283SBill.Holler@Sun.COM return (hpet.callback(code)); 1349283SBill.Holler@Sun.COM } 1359283SBill.Holler@Sun.COM return (B_FALSE); 1369283SBill.Holler@Sun.COM } 1379283SBill.Holler@Sun.COM 1389283SBill.Holler@Sun.COM /* 1399283SBill.Holler@Sun.COM * Some Local APIC Timers do not work during Deep C-states. 1409283SBill.Holler@Sun.COM * The Deep C-state idle function uses this function to ensure it is using a 1419283SBill.Holler@Sun.COM * hardware timer that works during Deep C-states. This function also 1429283SBill.Holler@Sun.COM * switches the timer back to the LACPI Timer after Deep C-state. 1439283SBill.Holler@Sun.COM */ 1449283SBill.Holler@Sun.COM static boolean_t 1459283SBill.Holler@Sun.COM cstate_use_timer(hrtime_t *lapic_expire, int timer) 1469283SBill.Holler@Sun.COM { 1479283SBill.Holler@Sun.COM if (cpu_cstate_arat) 1489283SBill.Holler@Sun.COM return (B_TRUE); 1499283SBill.Holler@Sun.COM 1509283SBill.Holler@Sun.COM /* 1519283SBill.Holler@Sun.COM * We have to return B_FALSE if no arat or hpet support 1529283SBill.Holler@Sun.COM */ 1539283SBill.Holler@Sun.COM if (!cpu_cstate_hpet) 1549283SBill.Holler@Sun.COM return (B_FALSE); 1559283SBill.Holler@Sun.COM 1569283SBill.Holler@Sun.COM switch (timer) { 1579283SBill.Holler@Sun.COM case CSTATE_USING_HPET: 1589283SBill.Holler@Sun.COM return (hpet.use_hpet_timer(lapic_expire)); 1599283SBill.Holler@Sun.COM case CSTATE_USING_LAT: 1609283SBill.Holler@Sun.COM hpet.use_lapic_timer(*lapic_expire); 1619283SBill.Holler@Sun.COM return (B_TRUE); 1629283SBill.Holler@Sun.COM default: 1639283SBill.Holler@Sun.COM return (B_FALSE); 1649283SBill.Holler@Sun.COM } 1659283SBill.Holler@Sun.COM } 1669283SBill.Holler@Sun.COM 1679283SBill.Holler@Sun.COM /* 1688906SEric.Saxe@Sun.COM * c-state wakeup function. 1698906SEric.Saxe@Sun.COM * Similar to cpu_wakeup and cpu_wakeup_mwait except this function deals 1708906SEric.Saxe@Sun.COM * with CPUs asleep in MWAIT, HLT, or ACPI Deep C-State. 1718906SEric.Saxe@Sun.COM */ 1728906SEric.Saxe@Sun.COM void 1738906SEric.Saxe@Sun.COM cstate_wakeup(cpu_t *cp, int bound) 1748906SEric.Saxe@Sun.COM { 1758906SEric.Saxe@Sun.COM struct machcpu *mcpu = &(cp->cpu_m); 1768906SEric.Saxe@Sun.COM volatile uint32_t *mcpu_mwait = mcpu->mcpu_mwait; 1778906SEric.Saxe@Sun.COM cpupart_t *cpu_part; 1788906SEric.Saxe@Sun.COM uint_t cpu_found; 1798906SEric.Saxe@Sun.COM processorid_t cpu_sid; 1808906SEric.Saxe@Sun.COM 1818906SEric.Saxe@Sun.COM cpu_part = cp->cpu_part; 1828906SEric.Saxe@Sun.COM cpu_sid = cp->cpu_seqid; 1838906SEric.Saxe@Sun.COM /* 1848906SEric.Saxe@Sun.COM * Clear the halted bit for that CPU since it will be woken up 1858906SEric.Saxe@Sun.COM * in a moment. 1868906SEric.Saxe@Sun.COM */ 1878906SEric.Saxe@Sun.COM if (bitset_in_set(&cpu_part->cp_haltset, cpu_sid)) { 1888906SEric.Saxe@Sun.COM /* 1898906SEric.Saxe@Sun.COM * Clear the halted bit for that CPU since it will be 1908906SEric.Saxe@Sun.COM * poked in a moment. 1918906SEric.Saxe@Sun.COM */ 1928906SEric.Saxe@Sun.COM bitset_atomic_del(&cpu_part->cp_haltset, cpu_sid); 1938906SEric.Saxe@Sun.COM 1948906SEric.Saxe@Sun.COM /* 1958906SEric.Saxe@Sun.COM * We may find the current CPU present in the halted cpuset 1968906SEric.Saxe@Sun.COM * if we're in the context of an interrupt that occurred 1978906SEric.Saxe@Sun.COM * before we had a chance to clear our bit in cpu_idle(). 1988906SEric.Saxe@Sun.COM * Waking ourself is obviously unnecessary, since if 1998906SEric.Saxe@Sun.COM * we're here, we're not halted. 2008906SEric.Saxe@Sun.COM */ 2018906SEric.Saxe@Sun.COM if (cp != CPU) { 2028906SEric.Saxe@Sun.COM /* 2038906SEric.Saxe@Sun.COM * Use correct wakeup mechanism 2048906SEric.Saxe@Sun.COM */ 2058906SEric.Saxe@Sun.COM if ((mcpu_mwait != NULL) && 2068906SEric.Saxe@Sun.COM (*mcpu_mwait == MWAIT_HALTED)) 2078906SEric.Saxe@Sun.COM MWAIT_WAKEUP(cp); 2088906SEric.Saxe@Sun.COM else 2098906SEric.Saxe@Sun.COM poke_cpu(cp->cpu_id); 2108906SEric.Saxe@Sun.COM } 2118906SEric.Saxe@Sun.COM return; 2128906SEric.Saxe@Sun.COM } else { 2138906SEric.Saxe@Sun.COM /* 2148906SEric.Saxe@Sun.COM * This cpu isn't halted, but it's idle or undergoing a 2158906SEric.Saxe@Sun.COM * context switch. No need to awaken anyone else. 2168906SEric.Saxe@Sun.COM */ 2178906SEric.Saxe@Sun.COM if (cp->cpu_thread == cp->cpu_idle_thread || 2188906SEric.Saxe@Sun.COM cp->cpu_disp_flags & CPU_DISP_DONTSTEAL) 2198906SEric.Saxe@Sun.COM return; 2208906SEric.Saxe@Sun.COM } 2218906SEric.Saxe@Sun.COM 2228906SEric.Saxe@Sun.COM /* 2238906SEric.Saxe@Sun.COM * No need to wake up other CPUs if the thread we just enqueued 2248906SEric.Saxe@Sun.COM * is bound. 2258906SEric.Saxe@Sun.COM */ 2268906SEric.Saxe@Sun.COM if (bound) 2278906SEric.Saxe@Sun.COM return; 2288906SEric.Saxe@Sun.COM 2298906SEric.Saxe@Sun.COM 2308906SEric.Saxe@Sun.COM /* 2318906SEric.Saxe@Sun.COM * See if there's any other halted CPUs. If there are, then 2328906SEric.Saxe@Sun.COM * select one, and awaken it. 2338906SEric.Saxe@Sun.COM * It's possible that after we find a CPU, somebody else 2348906SEric.Saxe@Sun.COM * will awaken it before we get the chance. 2358906SEric.Saxe@Sun.COM * In that case, look again. 2368906SEric.Saxe@Sun.COM */ 2378906SEric.Saxe@Sun.COM do { 2388906SEric.Saxe@Sun.COM cpu_found = bitset_find(&cpu_part->cp_haltset); 2398906SEric.Saxe@Sun.COM if (cpu_found == (uint_t)-1) 2408906SEric.Saxe@Sun.COM return; 2418906SEric.Saxe@Sun.COM 2428906SEric.Saxe@Sun.COM } while (bitset_atomic_test_and_del(&cpu_part->cp_haltset, 2438906SEric.Saxe@Sun.COM cpu_found) < 0); 2448906SEric.Saxe@Sun.COM 2458906SEric.Saxe@Sun.COM /* 2468906SEric.Saxe@Sun.COM * Must use correct wakeup mechanism to avoid lost wakeup of 2478906SEric.Saxe@Sun.COM * alternate cpu. 2488906SEric.Saxe@Sun.COM */ 2498906SEric.Saxe@Sun.COM if (cpu_found != CPU->cpu_seqid) { 2508906SEric.Saxe@Sun.COM mcpu_mwait = cpu[cpu_found]->cpu_m.mcpu_mwait; 2518906SEric.Saxe@Sun.COM if ((mcpu_mwait != NULL) && (*mcpu_mwait == MWAIT_HALTED)) 2528906SEric.Saxe@Sun.COM MWAIT_WAKEUP(cpu_seq[cpu_found]); 2538906SEric.Saxe@Sun.COM else 2548906SEric.Saxe@Sun.COM poke_cpu(cpu_seq[cpu_found]->cpu_id); 2558906SEric.Saxe@Sun.COM } 2568906SEric.Saxe@Sun.COM } 2578906SEric.Saxe@Sun.COM 2588906SEric.Saxe@Sun.COM /* 2599637SRandy.Fishel@Sun.COM * Function called by CPU idle notification framework to check whether CPU 2609637SRandy.Fishel@Sun.COM * has been awakened. It will be called with interrupt disabled. 2619637SRandy.Fishel@Sun.COM * If CPU has been awakened, call cpu_idle_exit() to notify CPU idle 2629637SRandy.Fishel@Sun.COM * notification framework. 2639637SRandy.Fishel@Sun.COM */ 2649637SRandy.Fishel@Sun.COM static void 2659637SRandy.Fishel@Sun.COM acpi_cpu_mwait_check_wakeup(void *arg) 2669637SRandy.Fishel@Sun.COM { 2679637SRandy.Fishel@Sun.COM volatile uint32_t *mcpu_mwait = (volatile uint32_t *)arg; 2689637SRandy.Fishel@Sun.COM 2699637SRandy.Fishel@Sun.COM ASSERT(arg != NULL); 2709637SRandy.Fishel@Sun.COM if (*mcpu_mwait != MWAIT_HALTED) { 2719637SRandy.Fishel@Sun.COM /* 2729637SRandy.Fishel@Sun.COM * CPU has been awakened, notify CPU idle notification system. 2739637SRandy.Fishel@Sun.COM */ 2749637SRandy.Fishel@Sun.COM cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE); 2759637SRandy.Fishel@Sun.COM } else { 2769637SRandy.Fishel@Sun.COM /* 2779637SRandy.Fishel@Sun.COM * Toggle interrupt flag to detect pending interrupts. 2789637SRandy.Fishel@Sun.COM * If interrupt happened, do_interrupt() will notify CPU idle 2799637SRandy.Fishel@Sun.COM * notification framework so no need to call cpu_idle_exit() 2809637SRandy.Fishel@Sun.COM * here. 2819637SRandy.Fishel@Sun.COM */ 2829637SRandy.Fishel@Sun.COM sti(); 2839637SRandy.Fishel@Sun.COM SMT_PAUSE(); 2849637SRandy.Fishel@Sun.COM cli(); 2859637SRandy.Fishel@Sun.COM } 2869637SRandy.Fishel@Sun.COM } 2879637SRandy.Fishel@Sun.COM 2889637SRandy.Fishel@Sun.COM static void 2899637SRandy.Fishel@Sun.COM acpi_cpu_mwait_ipi_check_wakeup(void *arg) 2909637SRandy.Fishel@Sun.COM { 2919637SRandy.Fishel@Sun.COM volatile uint32_t *mcpu_mwait = (volatile uint32_t *)arg; 2929637SRandy.Fishel@Sun.COM 2939637SRandy.Fishel@Sun.COM ASSERT(arg != NULL); 2949637SRandy.Fishel@Sun.COM if (*mcpu_mwait != MWAIT_WAKEUP_IPI) { 2959637SRandy.Fishel@Sun.COM /* 2969637SRandy.Fishel@Sun.COM * CPU has been awakened, notify CPU idle notification system. 2979637SRandy.Fishel@Sun.COM */ 2989637SRandy.Fishel@Sun.COM cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE); 2999637SRandy.Fishel@Sun.COM } else { 3009637SRandy.Fishel@Sun.COM /* 3019637SRandy.Fishel@Sun.COM * Toggle interrupt flag to detect pending interrupts. 3029637SRandy.Fishel@Sun.COM * If interrupt happened, do_interrupt() will notify CPU idle 3039637SRandy.Fishel@Sun.COM * notification framework so no need to call cpu_idle_exit() 3049637SRandy.Fishel@Sun.COM * here. 3059637SRandy.Fishel@Sun.COM */ 3069637SRandy.Fishel@Sun.COM sti(); 3079637SRandy.Fishel@Sun.COM SMT_PAUSE(); 3089637SRandy.Fishel@Sun.COM cli(); 3099637SRandy.Fishel@Sun.COM } 3109637SRandy.Fishel@Sun.COM } 3119637SRandy.Fishel@Sun.COM 3129637SRandy.Fishel@Sun.COM /*ARGSUSED*/ 3139637SRandy.Fishel@Sun.COM static void 3149637SRandy.Fishel@Sun.COM acpi_cpu_check_wakeup(void *arg) 3159637SRandy.Fishel@Sun.COM { 3169637SRandy.Fishel@Sun.COM /* 3179637SRandy.Fishel@Sun.COM * Toggle interrupt flag to detect pending interrupts. 3189637SRandy.Fishel@Sun.COM * If interrupt happened, do_interrupt() will notify CPU idle 3199637SRandy.Fishel@Sun.COM * notification framework so no need to call cpu_idle_exit() here. 3209637SRandy.Fishel@Sun.COM */ 3219637SRandy.Fishel@Sun.COM sti(); 3229637SRandy.Fishel@Sun.COM SMT_PAUSE(); 3239637SRandy.Fishel@Sun.COM cli(); 3249637SRandy.Fishel@Sun.COM } 3259637SRandy.Fishel@Sun.COM 3269637SRandy.Fishel@Sun.COM /* 3278906SEric.Saxe@Sun.COM * enter deep c-state handler 3288906SEric.Saxe@Sun.COM */ 3298906SEric.Saxe@Sun.COM static void 3308906SEric.Saxe@Sun.COM acpi_cpu_cstate(cpu_acpi_cstate_t *cstate) 3318906SEric.Saxe@Sun.COM { 3328906SEric.Saxe@Sun.COM volatile uint32_t *mcpu_mwait = CPU->cpu_m.mcpu_mwait; 3338906SEric.Saxe@Sun.COM cpu_t *cpup = CPU; 3348906SEric.Saxe@Sun.COM processorid_t cpu_sid = cpup->cpu_seqid; 3358906SEric.Saxe@Sun.COM cpupart_t *cp = cpup->cpu_part; 3368906SEric.Saxe@Sun.COM hrtime_t lapic_expire; 3378906SEric.Saxe@Sun.COM uint8_t type = cstate->cs_addrspace_id; 3388906SEric.Saxe@Sun.COM uint32_t cs_type = cstate->cs_type; 3398906SEric.Saxe@Sun.COM int hset_update = 1; 3409283SBill.Holler@Sun.COM boolean_t using_timer; 3419637SRandy.Fishel@Sun.COM cpu_idle_check_wakeup_t check_func = &acpi_cpu_check_wakeup; 3428906SEric.Saxe@Sun.COM 3438906SEric.Saxe@Sun.COM /* 3448906SEric.Saxe@Sun.COM * Set our mcpu_mwait here, so we can tell if anyone tries to 3458906SEric.Saxe@Sun.COM * wake us between now and when we call mwait. No other cpu will 3468906SEric.Saxe@Sun.COM * attempt to set our mcpu_mwait until we add ourself to the haltset. 3478906SEric.Saxe@Sun.COM */ 3488906SEric.Saxe@Sun.COM if (mcpu_mwait) { 3499637SRandy.Fishel@Sun.COM if (type == ACPI_ADR_SPACE_SYSTEM_IO) { 3508906SEric.Saxe@Sun.COM *mcpu_mwait = MWAIT_WAKEUP_IPI; 3519637SRandy.Fishel@Sun.COM check_func = &acpi_cpu_mwait_ipi_check_wakeup; 3529637SRandy.Fishel@Sun.COM } else { 3538906SEric.Saxe@Sun.COM *mcpu_mwait = MWAIT_HALTED; 3549637SRandy.Fishel@Sun.COM check_func = &acpi_cpu_mwait_check_wakeup; 3559637SRandy.Fishel@Sun.COM } 3568906SEric.Saxe@Sun.COM } 3578906SEric.Saxe@Sun.COM 3588906SEric.Saxe@Sun.COM /* 3598906SEric.Saxe@Sun.COM * If this CPU is online, and there are multiple CPUs 3608906SEric.Saxe@Sun.COM * in the system, then we should note our halting 3618906SEric.Saxe@Sun.COM * by adding ourselves to the partition's halted CPU 3628906SEric.Saxe@Sun.COM * bitmap. This allows other CPUs to find/awaken us when 3638906SEric.Saxe@Sun.COM * work becomes available. 3648906SEric.Saxe@Sun.COM */ 3658906SEric.Saxe@Sun.COM if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1) 3668906SEric.Saxe@Sun.COM hset_update = 0; 3678906SEric.Saxe@Sun.COM 3688906SEric.Saxe@Sun.COM /* 3698906SEric.Saxe@Sun.COM * Add ourselves to the partition's halted CPUs bitmask 3708906SEric.Saxe@Sun.COM * and set our HALTED flag, if necessary. 3718906SEric.Saxe@Sun.COM * 3728906SEric.Saxe@Sun.COM * When a thread becomes runnable, it is placed on the queue 3738906SEric.Saxe@Sun.COM * and then the halted cpuset is checked to determine who 3748906SEric.Saxe@Sun.COM * (if anyone) should be awakened. We therefore need to first 3758906SEric.Saxe@Sun.COM * add ourselves to the halted cpuset, and and then check if there 3768906SEric.Saxe@Sun.COM * is any work available. 3778906SEric.Saxe@Sun.COM * 3788906SEric.Saxe@Sun.COM * Note that memory barriers after updating the HALTED flag 3798906SEric.Saxe@Sun.COM * are not necessary since an atomic operation (updating the bitmap) 3808906SEric.Saxe@Sun.COM * immediately follows. On x86 the atomic operation acts as a 3818906SEric.Saxe@Sun.COM * memory barrier for the update of cpu_disp_flags. 3828906SEric.Saxe@Sun.COM */ 3838906SEric.Saxe@Sun.COM if (hset_update) { 3848906SEric.Saxe@Sun.COM cpup->cpu_disp_flags |= CPU_DISP_HALTED; 3858906SEric.Saxe@Sun.COM bitset_atomic_add(&cp->cp_haltset, cpu_sid); 3868906SEric.Saxe@Sun.COM } 3878906SEric.Saxe@Sun.COM 3888906SEric.Saxe@Sun.COM /* 3898906SEric.Saxe@Sun.COM * Check to make sure there's really nothing to do. 3908906SEric.Saxe@Sun.COM * Work destined for this CPU may become available after 3918906SEric.Saxe@Sun.COM * this check. We'll be notified through the clearing of our 3928906SEric.Saxe@Sun.COM * bit in the halted CPU bitmask, and a write to our mcpu_mwait. 3938906SEric.Saxe@Sun.COM * 3948906SEric.Saxe@Sun.COM * disp_anywork() checks disp_nrunnable, so we do not have to later. 3958906SEric.Saxe@Sun.COM */ 3968906SEric.Saxe@Sun.COM if (disp_anywork()) { 3978906SEric.Saxe@Sun.COM if (hset_update) { 3988906SEric.Saxe@Sun.COM cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 3998906SEric.Saxe@Sun.COM bitset_atomic_del(&cp->cp_haltset, cpu_sid); 4008906SEric.Saxe@Sun.COM } 4018906SEric.Saxe@Sun.COM return; 4028906SEric.Saxe@Sun.COM } 4038906SEric.Saxe@Sun.COM 4048906SEric.Saxe@Sun.COM /* 4058906SEric.Saxe@Sun.COM * We're on our way to being halted. 4068906SEric.Saxe@Sun.COM * 4078906SEric.Saxe@Sun.COM * The local APIC timer can stop in ACPI C2 and deeper c-states. 4089283SBill.Holler@Sun.COM * Try to program the HPET hardware to substitute for this CPU's 4099283SBill.Holler@Sun.COM * LAPIC timer. 4109283SBill.Holler@Sun.COM * cstate_use_timer() could disable the LAPIC Timer. Make sure 4119283SBill.Holler@Sun.COM * to start the LAPIC Timer again before leaving this function. 4128906SEric.Saxe@Sun.COM * 4139283SBill.Holler@Sun.COM * Disable interrupts here so we will awaken immediately after halting 4149283SBill.Holler@Sun.COM * if someone tries to poke us between now and the time we actually 4159283SBill.Holler@Sun.COM * halt. 4168906SEric.Saxe@Sun.COM */ 4179283SBill.Holler@Sun.COM cli(); 4189283SBill.Holler@Sun.COM using_timer = cstate_use_timer(&lapic_expire, CSTATE_USING_HPET); 4198906SEric.Saxe@Sun.COM 4208906SEric.Saxe@Sun.COM /* 4218906SEric.Saxe@Sun.COM * We check for the presence of our bit after disabling interrupts. 4228906SEric.Saxe@Sun.COM * If it's cleared, we'll return. If the bit is cleared after 4238906SEric.Saxe@Sun.COM * we check then the cstate_wakeup() will pop us out of the halted 4248906SEric.Saxe@Sun.COM * state. 4258906SEric.Saxe@Sun.COM * 4268906SEric.Saxe@Sun.COM * This means that the ordering of the cstate_wakeup() and the clearing 4278906SEric.Saxe@Sun.COM * of the bit by cpu_wakeup is important. 4288906SEric.Saxe@Sun.COM * cpu_wakeup() must clear our mc_haltset bit, and then call 4298906SEric.Saxe@Sun.COM * cstate_wakeup(). 4308906SEric.Saxe@Sun.COM * acpi_cpu_cstate() must disable interrupts, then check for the bit. 4318906SEric.Saxe@Sun.COM */ 4328906SEric.Saxe@Sun.COM if (hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid) == 0) { 4339283SBill.Holler@Sun.COM (void) cstate_use_timer(&lapic_expire, 4349283SBill.Holler@Sun.COM CSTATE_USING_LAT); 4359283SBill.Holler@Sun.COM sti(); 4368906SEric.Saxe@Sun.COM cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 4378906SEric.Saxe@Sun.COM return; 4388906SEric.Saxe@Sun.COM } 4398906SEric.Saxe@Sun.COM 4408906SEric.Saxe@Sun.COM /* 4418906SEric.Saxe@Sun.COM * The check for anything locally runnable is here for performance 4428906SEric.Saxe@Sun.COM * and isn't needed for correctness. disp_nrunnable ought to be 4438906SEric.Saxe@Sun.COM * in our cache still, so it's inexpensive to check, and if there 4448906SEric.Saxe@Sun.COM * is anything runnable we won't have to wait for the poke. 4458906SEric.Saxe@Sun.COM */ 4468906SEric.Saxe@Sun.COM if (cpup->cpu_disp->disp_nrunnable != 0) { 4479283SBill.Holler@Sun.COM (void) cstate_use_timer(&lapic_expire, 4489283SBill.Holler@Sun.COM CSTATE_USING_LAT); 4499283SBill.Holler@Sun.COM sti(); 4508906SEric.Saxe@Sun.COM if (hset_update) { 4518906SEric.Saxe@Sun.COM cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 4528906SEric.Saxe@Sun.COM bitset_atomic_del(&cp->cp_haltset, cpu_sid); 4538906SEric.Saxe@Sun.COM } 4548906SEric.Saxe@Sun.COM return; 4558906SEric.Saxe@Sun.COM } 4568906SEric.Saxe@Sun.COM 4579283SBill.Holler@Sun.COM if (using_timer == B_FALSE) { 4588906SEric.Saxe@Sun.COM 4599283SBill.Holler@Sun.COM (void) cstate_use_timer(&lapic_expire, 4609283SBill.Holler@Sun.COM CSTATE_USING_LAT); 4619283SBill.Holler@Sun.COM sti(); 4628906SEric.Saxe@Sun.COM 4638906SEric.Saxe@Sun.COM /* 4648906SEric.Saxe@Sun.COM * We are currently unable to program the HPET to act as this 4659283SBill.Holler@Sun.COM * CPU's proxy LAPIC timer. This CPU cannot enter C2 or deeper 4669283SBill.Holler@Sun.COM * because no timer is set to wake it up while its LAPIC timer 4678906SEric.Saxe@Sun.COM * stalls in deep C-States. 4688906SEric.Saxe@Sun.COM * Enter C1 instead. 4698906SEric.Saxe@Sun.COM * 4708906SEric.Saxe@Sun.COM * cstate_wake_cpu() will wake this CPU with an IPI which 4718906SEric.Saxe@Sun.COM * works with MWAIT. 4728906SEric.Saxe@Sun.COM */ 4738906SEric.Saxe@Sun.COM i86_monitor(mcpu_mwait, 0, 0); 4748906SEric.Saxe@Sun.COM if ((*mcpu_mwait & ~MWAIT_WAKEUP_IPI) == MWAIT_HALTED) { 4759637SRandy.Fishel@Sun.COM if (cpu_idle_enter(IDLE_STATE_C1, 0, 4769637SRandy.Fishel@Sun.COM check_func, (void *)mcpu_mwait) == 0) { 4779637SRandy.Fishel@Sun.COM if ((*mcpu_mwait & ~MWAIT_WAKEUP_IPI) == 4789637SRandy.Fishel@Sun.COM MWAIT_HALTED) { 4799637SRandy.Fishel@Sun.COM i86_mwait(0, 0); 4809637SRandy.Fishel@Sun.COM } 4819637SRandy.Fishel@Sun.COM cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE); 4829637SRandy.Fishel@Sun.COM } 4838906SEric.Saxe@Sun.COM } 4848906SEric.Saxe@Sun.COM 4858906SEric.Saxe@Sun.COM /* 4868906SEric.Saxe@Sun.COM * We're no longer halted 4878906SEric.Saxe@Sun.COM */ 4888906SEric.Saxe@Sun.COM if (hset_update) { 4898906SEric.Saxe@Sun.COM cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 4908906SEric.Saxe@Sun.COM bitset_atomic_del(&cp->cp_haltset, cpu_sid); 4918906SEric.Saxe@Sun.COM } 4928906SEric.Saxe@Sun.COM return; 4938906SEric.Saxe@Sun.COM } 4948906SEric.Saxe@Sun.COM 4958906SEric.Saxe@Sun.COM if (type == ACPI_ADR_SPACE_FIXED_HARDWARE) { 4968906SEric.Saxe@Sun.COM /* 4978906SEric.Saxe@Sun.COM * We're on our way to being halted. 4988906SEric.Saxe@Sun.COM * To avoid a lost wakeup, arm the monitor before checking 4998906SEric.Saxe@Sun.COM * if another cpu wrote to mcpu_mwait to wake us up. 5008906SEric.Saxe@Sun.COM */ 5018906SEric.Saxe@Sun.COM i86_monitor(mcpu_mwait, 0, 0); 5028906SEric.Saxe@Sun.COM if (*mcpu_mwait == MWAIT_HALTED) { 5039637SRandy.Fishel@Sun.COM if (cpu_idle_enter((uint_t)cs_type, 0, 5049637SRandy.Fishel@Sun.COM check_func, (void *)mcpu_mwait) == 0) { 5059637SRandy.Fishel@Sun.COM if (*mcpu_mwait == MWAIT_HALTED) { 5069637SRandy.Fishel@Sun.COM i86_mwait(cstate->cs_address, 1); 5079637SRandy.Fishel@Sun.COM } 5089637SRandy.Fishel@Sun.COM cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE); 5099637SRandy.Fishel@Sun.COM } 5108906SEric.Saxe@Sun.COM } 5118906SEric.Saxe@Sun.COM } else if (type == ACPI_ADR_SPACE_SYSTEM_IO) { 5128906SEric.Saxe@Sun.COM uint32_t value; 5138906SEric.Saxe@Sun.COM ACPI_TABLE_FADT *gbl_FADT; 5148906SEric.Saxe@Sun.COM 5158906SEric.Saxe@Sun.COM if (*mcpu_mwait == MWAIT_WAKEUP_IPI) { 5169637SRandy.Fishel@Sun.COM if (cpu_idle_enter((uint_t)cs_type, 0, 5179637SRandy.Fishel@Sun.COM check_func, (void *)mcpu_mwait) == 0) { 5189637SRandy.Fishel@Sun.COM if (*mcpu_mwait == MWAIT_WAKEUP_IPI) { 5199637SRandy.Fishel@Sun.COM (void) cpu_acpi_read_port( 5209637SRandy.Fishel@Sun.COM cstate->cs_address, &value, 8); 5219637SRandy.Fishel@Sun.COM acpica_get_global_FADT(&gbl_FADT); 5229637SRandy.Fishel@Sun.COM (void) cpu_acpi_read_port( 5239637SRandy.Fishel@Sun.COM gbl_FADT->XPmTimerBlock.Address, 5249637SRandy.Fishel@Sun.COM &value, 32); 5259637SRandy.Fishel@Sun.COM } 5269637SRandy.Fishel@Sun.COM cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE); 5279637SRandy.Fishel@Sun.COM } 5288906SEric.Saxe@Sun.COM } 5298906SEric.Saxe@Sun.COM } 5308906SEric.Saxe@Sun.COM 5318906SEric.Saxe@Sun.COM /* 5329283SBill.Holler@Sun.COM * The LAPIC timer may have stopped in deep c-state. 5339283SBill.Holler@Sun.COM * Reprogram this CPU's LAPIC here before enabling interrupts. 5348906SEric.Saxe@Sun.COM */ 5359283SBill.Holler@Sun.COM (void) cstate_use_timer(&lapic_expire, CSTATE_USING_LAT); 5369283SBill.Holler@Sun.COM sti(); 5378906SEric.Saxe@Sun.COM 5388906SEric.Saxe@Sun.COM /* 5398906SEric.Saxe@Sun.COM * We're no longer halted 5408906SEric.Saxe@Sun.COM */ 5418906SEric.Saxe@Sun.COM if (hset_update) { 5428906SEric.Saxe@Sun.COM cpup->cpu_disp_flags &= ~CPU_DISP_HALTED; 5438906SEric.Saxe@Sun.COM bitset_atomic_del(&cp->cp_haltset, cpu_sid); 5448906SEric.Saxe@Sun.COM } 5458906SEric.Saxe@Sun.COM } 5468906SEric.Saxe@Sun.COM 5478906SEric.Saxe@Sun.COM /* 5488906SEric.Saxe@Sun.COM * Idle the present CPU, deep c-state is supported 5498906SEric.Saxe@Sun.COM */ 5508906SEric.Saxe@Sun.COM void 5518906SEric.Saxe@Sun.COM cpu_acpi_idle(void) 5528906SEric.Saxe@Sun.COM { 5538906SEric.Saxe@Sun.COM cpu_t *cp = CPU; 5548906SEric.Saxe@Sun.COM cpu_acpi_handle_t handle; 5558906SEric.Saxe@Sun.COM cma_c_state_t *cs_data; 5568983SBill.Holler@Sun.COM cpu_acpi_cstate_t *cstates; 5578906SEric.Saxe@Sun.COM hrtime_t start, end; 5588906SEric.Saxe@Sun.COM int cpu_max_cstates; 5598983SBill.Holler@Sun.COM uint32_t cs_indx; 5608983SBill.Holler@Sun.COM uint16_t cs_type; 5618906SEric.Saxe@Sun.COM 5628906SEric.Saxe@Sun.COM cpupm_mach_state_t *mach_state = 5638906SEric.Saxe@Sun.COM (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; 5648906SEric.Saxe@Sun.COM handle = mach_state->ms_acpi_handle; 5658906SEric.Saxe@Sun.COM ASSERT(CPU_ACPI_CSTATES(handle) != NULL); 5668906SEric.Saxe@Sun.COM 5678906SEric.Saxe@Sun.COM cs_data = mach_state->ms_cstate.cma_state.cstate; 5688983SBill.Holler@Sun.COM cstates = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle); 5698983SBill.Holler@Sun.COM ASSERT(cstates != NULL); 5708906SEric.Saxe@Sun.COM cpu_max_cstates = cpu_acpi_get_max_cstates(handle); 5718906SEric.Saxe@Sun.COM if (cpu_max_cstates > CPU_MAX_CSTATES) 5728906SEric.Saxe@Sun.COM cpu_max_cstates = CPU_MAX_CSTATES; 5738983SBill.Holler@Sun.COM if (cpu_max_cstates == 1) { /* no ACPI c-state data */ 5748983SBill.Holler@Sun.COM (*non_deep_idle_cpu)(); 5758983SBill.Holler@Sun.COM return; 5768983SBill.Holler@Sun.COM } 5778906SEric.Saxe@Sun.COM 5788906SEric.Saxe@Sun.COM start = gethrtime_unscaled(); 5798906SEric.Saxe@Sun.COM 5808983SBill.Holler@Sun.COM cs_indx = cpupm_next_cstate(cs_data, cstates, cpu_max_cstates, start); 5818906SEric.Saxe@Sun.COM 5828983SBill.Holler@Sun.COM cs_type = cstates[cs_indx].cs_type; 5838906SEric.Saxe@Sun.COM 5848906SEric.Saxe@Sun.COM switch (cs_type) { 5858906SEric.Saxe@Sun.COM default: 5868906SEric.Saxe@Sun.COM /* FALLTHROUGH */ 5878906SEric.Saxe@Sun.COM case CPU_ACPI_C1: 5888906SEric.Saxe@Sun.COM (*non_deep_idle_cpu)(); 5898906SEric.Saxe@Sun.COM break; 5908906SEric.Saxe@Sun.COM 5918906SEric.Saxe@Sun.COM case CPU_ACPI_C2: 5928983SBill.Holler@Sun.COM acpi_cpu_cstate(&cstates[cs_indx]); 5938906SEric.Saxe@Sun.COM break; 5948906SEric.Saxe@Sun.COM 5958906SEric.Saxe@Sun.COM case CPU_ACPI_C3: 5968906SEric.Saxe@Sun.COM /* 59710447SBill.Holler@Sun.COM * All supported Intel processors maintain cache coherency 59810447SBill.Holler@Sun.COM * during C3. Currently when entering C3 processors flush 59910447SBill.Holler@Sun.COM * core caches to higher level shared cache. The shared cache 60010447SBill.Holler@Sun.COM * maintains state and supports probes during C3. 60110447SBill.Holler@Sun.COM * Consequently there is no need to handle cache coherency 60210447SBill.Holler@Sun.COM * and Bus Master activity here with the cache flush, BM_RLD 60310447SBill.Holler@Sun.COM * bit, BM_STS bit, nor PM2_CNT.ARB_DIS mechanisms described 60410447SBill.Holler@Sun.COM * in section 8.1.4 of the ACPI Specification 4.0. 6058906SEric.Saxe@Sun.COM */ 6068983SBill.Holler@Sun.COM acpi_cpu_cstate(&cstates[cs_indx]); 6078906SEric.Saxe@Sun.COM break; 6088906SEric.Saxe@Sun.COM } 6098906SEric.Saxe@Sun.COM 6108906SEric.Saxe@Sun.COM end = gethrtime_unscaled(); 6118906SEric.Saxe@Sun.COM 6128906SEric.Saxe@Sun.COM /* 6138906SEric.Saxe@Sun.COM * Update statistics 6148906SEric.Saxe@Sun.COM */ 6158906SEric.Saxe@Sun.COM cpupm_wakeup_cstate_data(cs_data, end); 6168906SEric.Saxe@Sun.COM } 6178906SEric.Saxe@Sun.COM 6188906SEric.Saxe@Sun.COM boolean_t 6198906SEric.Saxe@Sun.COM cpu_deep_cstates_supported(void) 6208906SEric.Saxe@Sun.COM { 6218906SEric.Saxe@Sun.COM extern int idle_cpu_no_deep_c; 6228906SEric.Saxe@Sun.COM 6238906SEric.Saxe@Sun.COM if (idle_cpu_no_deep_c) 6248906SEric.Saxe@Sun.COM return (B_FALSE); 6258906SEric.Saxe@Sun.COM 6268906SEric.Saxe@Sun.COM if (!cpuid_deep_cstates_supported()) 6278906SEric.Saxe@Sun.COM return (B_FALSE); 6288906SEric.Saxe@Sun.COM 6299283SBill.Holler@Sun.COM if (cpuid_arat_supported()) { 6309283SBill.Holler@Sun.COM cpu_cstate_arat = B_TRUE; 6319283SBill.Holler@Sun.COM return (B_TRUE); 6329283SBill.Holler@Sun.COM } 6338906SEric.Saxe@Sun.COM 6349283SBill.Holler@Sun.COM if ((hpet.supported == HPET_FULL_SUPPORT) && 6359283SBill.Holler@Sun.COM hpet.install_proxy()) { 6369283SBill.Holler@Sun.COM cpu_cstate_hpet = B_TRUE; 6379283SBill.Holler@Sun.COM return (B_TRUE); 6389283SBill.Holler@Sun.COM } 6399283SBill.Holler@Sun.COM 6409283SBill.Holler@Sun.COM return (B_FALSE); 6418906SEric.Saxe@Sun.COM } 6428906SEric.Saxe@Sun.COM 6438906SEric.Saxe@Sun.COM /* 6448906SEric.Saxe@Sun.COM * Validate that this processor supports deep cstate and if so, 6458906SEric.Saxe@Sun.COM * get the c-state data from ACPI and cache it. 6468906SEric.Saxe@Sun.COM */ 6478906SEric.Saxe@Sun.COM static int 6488906SEric.Saxe@Sun.COM cpu_idle_init(cpu_t *cp) 6498906SEric.Saxe@Sun.COM { 6508906SEric.Saxe@Sun.COM cpupm_mach_state_t *mach_state = 6518906SEric.Saxe@Sun.COM (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; 6528906SEric.Saxe@Sun.COM cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; 6538906SEric.Saxe@Sun.COM cpu_acpi_cstate_t *cstate; 6548906SEric.Saxe@Sun.COM char name[KSTAT_STRLEN]; 6558906SEric.Saxe@Sun.COM int cpu_max_cstates, i; 65610075SMark.Haywood@Sun.COM int ret; 6578906SEric.Saxe@Sun.COM 6588906SEric.Saxe@Sun.COM /* 6598906SEric.Saxe@Sun.COM * Cache the C-state specific ACPI data. 6608906SEric.Saxe@Sun.COM */ 66110075SMark.Haywood@Sun.COM if ((ret = cpu_acpi_cache_cstate_data(handle)) != 0) { 66210075SMark.Haywood@Sun.COM if (ret < 0) 66310075SMark.Haywood@Sun.COM cmn_err(CE_NOTE, 66410075SMark.Haywood@Sun.COM "!Support for CPU deep idle states is being " 66510075SMark.Haywood@Sun.COM "disabled due to errors parsing ACPI C-state " 66610075SMark.Haywood@Sun.COM "objects exported by BIOS."); 6678906SEric.Saxe@Sun.COM cpu_idle_fini(cp); 6688906SEric.Saxe@Sun.COM return (-1); 6698906SEric.Saxe@Sun.COM } 6708906SEric.Saxe@Sun.COM 6718906SEric.Saxe@Sun.COM cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle); 6728906SEric.Saxe@Sun.COM 6738906SEric.Saxe@Sun.COM cpu_max_cstates = cpu_acpi_get_max_cstates(handle); 6748906SEric.Saxe@Sun.COM 6758906SEric.Saxe@Sun.COM for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) { 6768906SEric.Saxe@Sun.COM (void) snprintf(name, KSTAT_STRLEN - 1, "c%d", cstate->cs_type); 6778906SEric.Saxe@Sun.COM /* 6788906SEric.Saxe@Sun.COM * Allocate, initialize and install cstate kstat 6798906SEric.Saxe@Sun.COM */ 6808906SEric.Saxe@Sun.COM cstate->cs_ksp = kstat_create("cstate", CPU->cpu_id, 6818906SEric.Saxe@Sun.COM name, "misc", 6828906SEric.Saxe@Sun.COM KSTAT_TYPE_NAMED, 6838906SEric.Saxe@Sun.COM sizeof (cpu_idle_kstat) / sizeof (kstat_named_t), 6848906SEric.Saxe@Sun.COM KSTAT_FLAG_VIRTUAL); 6858906SEric.Saxe@Sun.COM 6868906SEric.Saxe@Sun.COM if (cstate->cs_ksp == NULL) { 6878906SEric.Saxe@Sun.COM cmn_err(CE_NOTE, "kstat_create(c_state) fail"); 6888906SEric.Saxe@Sun.COM } else { 6898906SEric.Saxe@Sun.COM cstate->cs_ksp->ks_data = &cpu_idle_kstat; 6908906SEric.Saxe@Sun.COM cstate->cs_ksp->ks_lock = &cpu_idle_mutex; 6918906SEric.Saxe@Sun.COM cstate->cs_ksp->ks_update = cpu_idle_kstat_update; 6928906SEric.Saxe@Sun.COM cstate->cs_ksp->ks_data_size += MAXNAMELEN; 6938906SEric.Saxe@Sun.COM cstate->cs_ksp->ks_private = cstate; 6948906SEric.Saxe@Sun.COM kstat_install(cstate->cs_ksp); 6958906SEric.Saxe@Sun.COM cstate++; 6968906SEric.Saxe@Sun.COM } 6978906SEric.Saxe@Sun.COM } 6988906SEric.Saxe@Sun.COM 6998906SEric.Saxe@Sun.COM cpupm_alloc_domains(cp, CPUPM_C_STATES); 7008906SEric.Saxe@Sun.COM cpupm_alloc_ms_cstate(cp); 7018906SEric.Saxe@Sun.COM 7028906SEric.Saxe@Sun.COM if (cpu_deep_cstates_supported()) { 70310447SBill.Holler@Sun.COM uint32_t value; 70410447SBill.Holler@Sun.COM 7058906SEric.Saxe@Sun.COM mutex_enter(&cpu_idle_callb_mutex); 7068906SEric.Saxe@Sun.COM if (cpu_deep_idle_callb_id == (callb_id_t)0) 7078906SEric.Saxe@Sun.COM cpu_deep_idle_callb_id = callb_add(&cpu_deep_idle_callb, 7088906SEric.Saxe@Sun.COM (void *)NULL, CB_CL_CPU_DEEP_IDLE, "cpu_deep_idle"); 7098906SEric.Saxe@Sun.COM if (cpu_idle_cpr_callb_id == (callb_id_t)0) 7108906SEric.Saxe@Sun.COM cpu_idle_cpr_callb_id = callb_add(&cpu_idle_cpr_callb, 7118906SEric.Saxe@Sun.COM (void *)NULL, CB_CL_CPR_PM, "cpu_idle_cpr"); 7128906SEric.Saxe@Sun.COM mutex_exit(&cpu_idle_callb_mutex); 71310447SBill.Holler@Sun.COM 71410447SBill.Holler@Sun.COM 71510447SBill.Holler@Sun.COM /* 71610447SBill.Holler@Sun.COM * All supported CPUs (Nehalem and later) will remain in C3 71710447SBill.Holler@Sun.COM * during Bus Master activity. 71810447SBill.Holler@Sun.COM * All CPUs set ACPI_BITREG_BUS_MASTER_RLD to 0 here if it 71910447SBill.Holler@Sun.COM * is not already 0 before enabling Deeper C-states. 72010447SBill.Holler@Sun.COM */ 72110447SBill.Holler@Sun.COM cpu_acpi_get_register(ACPI_BITREG_BUS_MASTER_RLD, &value); 72210447SBill.Holler@Sun.COM if (value & 1) 72310447SBill.Holler@Sun.COM cpu_acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 7248906SEric.Saxe@Sun.COM } 7258906SEric.Saxe@Sun.COM 7268906SEric.Saxe@Sun.COM return (0); 7278906SEric.Saxe@Sun.COM } 7288906SEric.Saxe@Sun.COM 7298906SEric.Saxe@Sun.COM /* 7308906SEric.Saxe@Sun.COM * Free resources allocated by cpu_idle_init(). 7318906SEric.Saxe@Sun.COM */ 7328906SEric.Saxe@Sun.COM static void 7338906SEric.Saxe@Sun.COM cpu_idle_fini(cpu_t *cp) 7348906SEric.Saxe@Sun.COM { 7358906SEric.Saxe@Sun.COM cpupm_mach_state_t *mach_state = 7368906SEric.Saxe@Sun.COM (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); 7378906SEric.Saxe@Sun.COM cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; 7388906SEric.Saxe@Sun.COM cpu_acpi_cstate_t *cstate; 7398906SEric.Saxe@Sun.COM uint_t cpu_max_cstates, i; 7408906SEric.Saxe@Sun.COM 7418906SEric.Saxe@Sun.COM /* 7428906SEric.Saxe@Sun.COM * idle cpu points back to the generic one 7438906SEric.Saxe@Sun.COM */ 744*10961Saubrey.li@intel.com idle_cpu = cp->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu; 7458906SEric.Saxe@Sun.COM disp_enq_thread = non_deep_idle_disp_enq_thread; 7468906SEric.Saxe@Sun.COM 7478906SEric.Saxe@Sun.COM cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle); 7488906SEric.Saxe@Sun.COM if (cstate) { 7498906SEric.Saxe@Sun.COM cpu_max_cstates = cpu_acpi_get_max_cstates(handle); 7508906SEric.Saxe@Sun.COM 7518906SEric.Saxe@Sun.COM for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) { 7528906SEric.Saxe@Sun.COM if (cstate->cs_ksp != NULL) 7538906SEric.Saxe@Sun.COM kstat_delete(cstate->cs_ksp); 7548906SEric.Saxe@Sun.COM cstate++; 7558906SEric.Saxe@Sun.COM } 7568906SEric.Saxe@Sun.COM } 7578906SEric.Saxe@Sun.COM 7588906SEric.Saxe@Sun.COM cpupm_free_ms_cstate(cp); 7598906SEric.Saxe@Sun.COM cpupm_free_domains(&cpupm_cstate_domains); 7608906SEric.Saxe@Sun.COM cpu_acpi_free_cstate_data(handle); 7618906SEric.Saxe@Sun.COM 7628906SEric.Saxe@Sun.COM mutex_enter(&cpu_idle_callb_mutex); 7638906SEric.Saxe@Sun.COM if (cpu_deep_idle_callb_id != (callb_id_t)0) { 7648906SEric.Saxe@Sun.COM (void) callb_delete(cpu_deep_idle_callb_id); 7658906SEric.Saxe@Sun.COM cpu_deep_idle_callb_id = (callb_id_t)0; 7668906SEric.Saxe@Sun.COM } 7678906SEric.Saxe@Sun.COM if (cpu_idle_cpr_callb_id != (callb_id_t)0) { 7688906SEric.Saxe@Sun.COM (void) callb_delete(cpu_idle_cpr_callb_id); 7698906SEric.Saxe@Sun.COM cpu_idle_cpr_callb_id = (callb_id_t)0; 7708906SEric.Saxe@Sun.COM } 7718906SEric.Saxe@Sun.COM mutex_exit(&cpu_idle_callb_mutex); 7728906SEric.Saxe@Sun.COM } 7738906SEric.Saxe@Sun.COM 77410488SMark.Haywood@Sun.COM static void 77510488SMark.Haywood@Sun.COM cpu_idle_stop(cpu_t *cp) 77610488SMark.Haywood@Sun.COM { 77710488SMark.Haywood@Sun.COM cpupm_mach_state_t *mach_state = 77810488SMark.Haywood@Sun.COM (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state); 77910488SMark.Haywood@Sun.COM cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; 78010488SMark.Haywood@Sun.COM cpu_acpi_cstate_t *cstate; 78110488SMark.Haywood@Sun.COM uint_t cpu_max_cstates, i; 78210488SMark.Haywood@Sun.COM 783*10961Saubrey.li@intel.com /* 784*10961Saubrey.li@intel.com * place the CPUs in a safe place so that we can disable 785*10961Saubrey.li@intel.com * deep c-state on them. 786*10961Saubrey.li@intel.com */ 787*10961Saubrey.li@intel.com pause_cpus(NULL); 788*10961Saubrey.li@intel.com cp->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu; 789*10961Saubrey.li@intel.com start_cpus(); 790*10961Saubrey.li@intel.com 79110488SMark.Haywood@Sun.COM cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle); 79210488SMark.Haywood@Sun.COM if (cstate) { 79310488SMark.Haywood@Sun.COM cpu_max_cstates = cpu_acpi_get_max_cstates(handle); 79410488SMark.Haywood@Sun.COM 79510488SMark.Haywood@Sun.COM for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) { 79610488SMark.Haywood@Sun.COM if (cstate->cs_ksp != NULL) 79710488SMark.Haywood@Sun.COM kstat_delete(cstate->cs_ksp); 79810488SMark.Haywood@Sun.COM cstate++; 79910488SMark.Haywood@Sun.COM } 80010488SMark.Haywood@Sun.COM } 80110488SMark.Haywood@Sun.COM cpupm_free_ms_cstate(cp); 80210488SMark.Haywood@Sun.COM cpupm_remove_domains(cp, CPUPM_C_STATES, &cpupm_cstate_domains); 80310488SMark.Haywood@Sun.COM cpu_acpi_free_cstate_data(handle); 80410488SMark.Haywood@Sun.COM } 80510488SMark.Haywood@Sun.COM 8068906SEric.Saxe@Sun.COM /*ARGSUSED*/ 8078906SEric.Saxe@Sun.COM static boolean_t 8088906SEric.Saxe@Sun.COM cpu_deep_idle_callb(void *arg, int code) 8098906SEric.Saxe@Sun.COM { 8108906SEric.Saxe@Sun.COM boolean_t rslt = B_TRUE; 8118906SEric.Saxe@Sun.COM 8128906SEric.Saxe@Sun.COM mutex_enter(&cpu_idle_callb_mutex); 8138906SEric.Saxe@Sun.COM switch (code) { 8148906SEric.Saxe@Sun.COM case PM_DEFAULT_CPU_DEEP_IDLE: 8158906SEric.Saxe@Sun.COM /* 8168906SEric.Saxe@Sun.COM * Default policy is same as enable 8178906SEric.Saxe@Sun.COM */ 8188906SEric.Saxe@Sun.COM /*FALLTHROUGH*/ 8198906SEric.Saxe@Sun.COM case PM_ENABLE_CPU_DEEP_IDLE: 8208906SEric.Saxe@Sun.COM if ((cpu_idle_cfg_state & CPU_IDLE_DEEP_CFG) == 0) 8218906SEric.Saxe@Sun.COM break; 8228906SEric.Saxe@Sun.COM 8239283SBill.Holler@Sun.COM if (cstate_timer_callback(PM_ENABLE_CPU_DEEP_IDLE)) { 8248906SEric.Saxe@Sun.COM disp_enq_thread = cstate_wakeup; 8258906SEric.Saxe@Sun.COM idle_cpu = cpu_idle_adaptive; 8268906SEric.Saxe@Sun.COM cpu_idle_cfg_state &= ~CPU_IDLE_DEEP_CFG; 8278906SEric.Saxe@Sun.COM } else { 8288906SEric.Saxe@Sun.COM rslt = B_FALSE; 8298906SEric.Saxe@Sun.COM } 8308906SEric.Saxe@Sun.COM break; 8318906SEric.Saxe@Sun.COM 8328906SEric.Saxe@Sun.COM case PM_DISABLE_CPU_DEEP_IDLE: 8338906SEric.Saxe@Sun.COM if (cpu_idle_cfg_state & CPU_IDLE_DEEP_CFG) 8348906SEric.Saxe@Sun.COM break; 8358906SEric.Saxe@Sun.COM 8368906SEric.Saxe@Sun.COM idle_cpu = non_deep_idle_cpu; 8379283SBill.Holler@Sun.COM if (cstate_timer_callback(PM_DISABLE_CPU_DEEP_IDLE)) { 8388906SEric.Saxe@Sun.COM disp_enq_thread = non_deep_idle_disp_enq_thread; 8398906SEric.Saxe@Sun.COM cpu_idle_cfg_state |= CPU_IDLE_DEEP_CFG; 8408906SEric.Saxe@Sun.COM } 8418906SEric.Saxe@Sun.COM break; 8428906SEric.Saxe@Sun.COM 8438906SEric.Saxe@Sun.COM default: 8448906SEric.Saxe@Sun.COM cmn_err(CE_NOTE, "!cpu deep_idle_callb: invalid code %d\n", 8458906SEric.Saxe@Sun.COM code); 8468906SEric.Saxe@Sun.COM break; 8478906SEric.Saxe@Sun.COM } 8488906SEric.Saxe@Sun.COM mutex_exit(&cpu_idle_callb_mutex); 8498906SEric.Saxe@Sun.COM return (rslt); 8508906SEric.Saxe@Sun.COM } 8518906SEric.Saxe@Sun.COM 8528906SEric.Saxe@Sun.COM /*ARGSUSED*/ 8538906SEric.Saxe@Sun.COM static boolean_t 8548906SEric.Saxe@Sun.COM cpu_idle_cpr_callb(void *arg, int code) 8558906SEric.Saxe@Sun.COM { 8568906SEric.Saxe@Sun.COM boolean_t rslt = B_TRUE; 8578906SEric.Saxe@Sun.COM 8588906SEric.Saxe@Sun.COM mutex_enter(&cpu_idle_callb_mutex); 8598906SEric.Saxe@Sun.COM switch (code) { 8608906SEric.Saxe@Sun.COM case CB_CODE_CPR_RESUME: 8619283SBill.Holler@Sun.COM if (cstate_timer_callback(CB_CODE_CPR_RESUME)) { 8628906SEric.Saxe@Sun.COM /* 8638906SEric.Saxe@Sun.COM * Do not enable dispatcher hooks if disabled by user. 8648906SEric.Saxe@Sun.COM */ 8658906SEric.Saxe@Sun.COM if (cpu_idle_cfg_state & CPU_IDLE_DEEP_CFG) 8668906SEric.Saxe@Sun.COM break; 8678906SEric.Saxe@Sun.COM 8688906SEric.Saxe@Sun.COM disp_enq_thread = cstate_wakeup; 8698906SEric.Saxe@Sun.COM idle_cpu = cpu_idle_adaptive; 8708906SEric.Saxe@Sun.COM } else { 8718906SEric.Saxe@Sun.COM rslt = B_FALSE; 8728906SEric.Saxe@Sun.COM } 8738906SEric.Saxe@Sun.COM break; 8748906SEric.Saxe@Sun.COM 8758906SEric.Saxe@Sun.COM case CB_CODE_CPR_CHKPT: 8768906SEric.Saxe@Sun.COM idle_cpu = non_deep_idle_cpu; 8778906SEric.Saxe@Sun.COM disp_enq_thread = non_deep_idle_disp_enq_thread; 8789283SBill.Holler@Sun.COM (void) cstate_timer_callback(CB_CODE_CPR_CHKPT); 8798906SEric.Saxe@Sun.COM break; 8808906SEric.Saxe@Sun.COM 8818906SEric.Saxe@Sun.COM default: 8828906SEric.Saxe@Sun.COM cmn_err(CE_NOTE, "!cpudvr cpr_callb: invalid code %d\n", code); 8838906SEric.Saxe@Sun.COM break; 8848906SEric.Saxe@Sun.COM } 8858906SEric.Saxe@Sun.COM mutex_exit(&cpu_idle_callb_mutex); 8868906SEric.Saxe@Sun.COM return (rslt); 8878906SEric.Saxe@Sun.COM } 8888906SEric.Saxe@Sun.COM 8898906SEric.Saxe@Sun.COM /* 8908906SEric.Saxe@Sun.COM * handle _CST notification 8918906SEric.Saxe@Sun.COM */ 8928906SEric.Saxe@Sun.COM void 8938906SEric.Saxe@Sun.COM cpuidle_cstate_instance(cpu_t *cp) 8948906SEric.Saxe@Sun.COM { 8958906SEric.Saxe@Sun.COM #ifndef __xpv 8968906SEric.Saxe@Sun.COM cpupm_mach_state_t *mach_state = 8978906SEric.Saxe@Sun.COM (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; 8988906SEric.Saxe@Sun.COM cpu_acpi_handle_t handle; 8998906SEric.Saxe@Sun.COM struct machcpu *mcpu; 9008906SEric.Saxe@Sun.COM cpuset_t dom_cpu_set; 9018906SEric.Saxe@Sun.COM kmutex_t *pm_lock; 9028906SEric.Saxe@Sun.COM int result = 0; 9038906SEric.Saxe@Sun.COM processorid_t cpu_id; 9048906SEric.Saxe@Sun.COM 9058906SEric.Saxe@Sun.COM if (mach_state == NULL) { 9068906SEric.Saxe@Sun.COM return; 9078906SEric.Saxe@Sun.COM } 9088906SEric.Saxe@Sun.COM 9098906SEric.Saxe@Sun.COM ASSERT(mach_state->ms_cstate.cma_domain != NULL); 9108906SEric.Saxe@Sun.COM dom_cpu_set = mach_state->ms_cstate.cma_domain->pm_cpus; 9118906SEric.Saxe@Sun.COM pm_lock = &mach_state->ms_cstate.cma_domain->pm_lock; 9128906SEric.Saxe@Sun.COM 9138906SEric.Saxe@Sun.COM /* 9148906SEric.Saxe@Sun.COM * Do for all the CPU's in the domain 9158906SEric.Saxe@Sun.COM */ 9168906SEric.Saxe@Sun.COM mutex_enter(pm_lock); 9178906SEric.Saxe@Sun.COM do { 9188906SEric.Saxe@Sun.COM CPUSET_FIND(dom_cpu_set, cpu_id); 9198906SEric.Saxe@Sun.COM if (cpu_id == CPUSET_NOTINSET) 9208906SEric.Saxe@Sun.COM break; 9218906SEric.Saxe@Sun.COM 9228906SEric.Saxe@Sun.COM ASSERT(cpu_id >= 0 && cpu_id < NCPU); 9238906SEric.Saxe@Sun.COM cp = cpu[cpu_id]; 9248906SEric.Saxe@Sun.COM mach_state = (cpupm_mach_state_t *) 9258906SEric.Saxe@Sun.COM cp->cpu_m.mcpu_pm_mach_state; 9268906SEric.Saxe@Sun.COM if (!(mach_state->ms_caps & CPUPM_C_STATES)) { 9278906SEric.Saxe@Sun.COM mutex_exit(pm_lock); 9288906SEric.Saxe@Sun.COM return; 9298906SEric.Saxe@Sun.COM } 9308906SEric.Saxe@Sun.COM handle = mach_state->ms_acpi_handle; 9318906SEric.Saxe@Sun.COM ASSERT(handle != NULL); 9328906SEric.Saxe@Sun.COM 9338906SEric.Saxe@Sun.COM /* 9348906SEric.Saxe@Sun.COM * re-evaluate cstate object 9358906SEric.Saxe@Sun.COM */ 9368906SEric.Saxe@Sun.COM if (cpu_acpi_cache_cstate_data(handle) != 0) { 9378906SEric.Saxe@Sun.COM cmn_err(CE_WARN, "Cannot re-evaluate the cpu c-state" 9388906SEric.Saxe@Sun.COM " object Instance: %d", cpu_id); 9398906SEric.Saxe@Sun.COM } 9408906SEric.Saxe@Sun.COM mutex_enter(&cpu_lock); 9418906SEric.Saxe@Sun.COM mcpu = &(cp->cpu_m); 9428906SEric.Saxe@Sun.COM mcpu->max_cstates = cpu_acpi_get_max_cstates(handle); 9438906SEric.Saxe@Sun.COM if (mcpu->max_cstates > CPU_ACPI_C1) { 9449283SBill.Holler@Sun.COM (void) cstate_timer_callback( 9459283SBill.Holler@Sun.COM CST_EVENT_MULTIPLE_CSTATES); 9468906SEric.Saxe@Sun.COM disp_enq_thread = cstate_wakeup; 9478906SEric.Saxe@Sun.COM cp->cpu_m.mcpu_idle_cpu = cpu_acpi_idle; 9488906SEric.Saxe@Sun.COM } else if (mcpu->max_cstates == CPU_ACPI_C1) { 9498906SEric.Saxe@Sun.COM disp_enq_thread = non_deep_idle_disp_enq_thread; 9508906SEric.Saxe@Sun.COM cp->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu; 9519283SBill.Holler@Sun.COM (void) cstate_timer_callback(CST_EVENT_ONE_CSTATE); 9528906SEric.Saxe@Sun.COM } 9538906SEric.Saxe@Sun.COM mutex_exit(&cpu_lock); 9548906SEric.Saxe@Sun.COM 9558906SEric.Saxe@Sun.COM CPUSET_ATOMIC_XDEL(dom_cpu_set, cpu_id, result); 9568906SEric.Saxe@Sun.COM } while (result < 0); 95710488SMark.Haywood@Sun.COM mutex_exit(pm_lock); 9588906SEric.Saxe@Sun.COM #endif 9598906SEric.Saxe@Sun.COM } 9608906SEric.Saxe@Sun.COM 9618906SEric.Saxe@Sun.COM /* 9628906SEric.Saxe@Sun.COM * handle the number or the type of available processor power states change 9638906SEric.Saxe@Sun.COM */ 9648906SEric.Saxe@Sun.COM void 9658906SEric.Saxe@Sun.COM cpuidle_manage_cstates(void *ctx) 9668906SEric.Saxe@Sun.COM { 9678906SEric.Saxe@Sun.COM cpu_t *cp = ctx; 9688906SEric.Saxe@Sun.COM cpupm_mach_state_t *mach_state = 9698906SEric.Saxe@Sun.COM (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; 9708906SEric.Saxe@Sun.COM boolean_t is_ready; 9718906SEric.Saxe@Sun.COM 9728906SEric.Saxe@Sun.COM if (mach_state == NULL) { 9738906SEric.Saxe@Sun.COM return; 9748906SEric.Saxe@Sun.COM } 9758906SEric.Saxe@Sun.COM 9768906SEric.Saxe@Sun.COM /* 9778906SEric.Saxe@Sun.COM * We currently refuse to power manage if the CPU is not ready to 9788906SEric.Saxe@Sun.COM * take cross calls (cross calls fail silently if CPU is not ready 9798906SEric.Saxe@Sun.COM * for it). 9808906SEric.Saxe@Sun.COM * 98110488SMark.Haywood@Sun.COM * Additionally, for x86 platforms we cannot power manage an instance, 98210488SMark.Haywood@Sun.COM * until it has been initialized. 9838906SEric.Saxe@Sun.COM */ 98410488SMark.Haywood@Sun.COM is_ready = (cp->cpu_flags & CPU_READY) && cpupm_cstate_ready(cp); 9858906SEric.Saxe@Sun.COM if (!is_ready) 9868906SEric.Saxe@Sun.COM return; 9878906SEric.Saxe@Sun.COM 9888906SEric.Saxe@Sun.COM cpuidle_cstate_instance(cp); 9898906SEric.Saxe@Sun.COM } 990