10e751525SEric Saxe /*
20e751525SEric Saxe * CDDL HEADER START
30e751525SEric Saxe *
40e751525SEric Saxe * The contents of this file are subject to the terms of the
50e751525SEric Saxe * Common Development and Distribution License (the "License").
60e751525SEric Saxe * You may not use this file except in compliance with the License.
70e751525SEric Saxe *
80e751525SEric Saxe * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90e751525SEric Saxe * or http://www.opensolaris.org/os/licensing.
100e751525SEric Saxe * See the License for the specific language governing permissions
110e751525SEric Saxe * and limitations under the License.
120e751525SEric Saxe *
130e751525SEric Saxe * When distributing Covered Code, include this CDDL HEADER in each
140e751525SEric Saxe * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150e751525SEric Saxe * If applicable, add the following below this CDDL HEADER, with the
160e751525SEric Saxe * fields enclosed by brackets "[]" replaced with your own identifying
170e751525SEric Saxe * information: Portions Copyright [yyyy] [name of copyright owner]
180e751525SEric Saxe *
190e751525SEric Saxe * CDDL HEADER END
200e751525SEric Saxe */
210e751525SEric Saxe /*
220e751525SEric Saxe * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
230e751525SEric Saxe * Use is subject to license terms.
240e751525SEric Saxe */
25cef70d2cSBill Holler /*
26a3114836SGerry Liu * Copyright (c) 2009-2010, Intel Corporation.
27cef70d2cSBill Holler * All rights reserved.
28cef70d2cSBill Holler */
29a9cc46cfSRobert Mustacchi /*
30a9cc46cfSRobert Mustacchi * Copyright 2019 Joyent, Inc.
31*c77fb393Siximeow * Copyright 2025 Oxide Computer Company
32a9cc46cfSRobert Mustacchi */
330e751525SEric Saxe
340e751525SEric Saxe #include <sys/x86_archext.h>
350e751525SEric Saxe #include <sys/machsystm.h>
360e751525SEric Saxe #include <sys/x_call.h>
370e751525SEric Saxe #include <sys/stat.h>
380e751525SEric Saxe #include <sys/acpi/acpi.h>
390e751525SEric Saxe #include <sys/acpica.h>
400e751525SEric Saxe #include <sys/cpu_acpi.h>
410e751525SEric Saxe #include <sys/cpu_idle.h>
420e751525SEric Saxe #include <sys/cpupm.h>
43fb2caebeSRandy Fishel #include <sys/cpu_event.h>
440e751525SEric Saxe #include <sys/hpet.h>
450e751525SEric Saxe #include <sys/archsystm.h>
460e751525SEric Saxe #include <vm/hat_i86.h>
470e751525SEric Saxe #include <sys/dtrace.h>
480e751525SEric Saxe #include <sys/sdt.h>
490e751525SEric Saxe #include <sys/callb.h>
500e751525SEric Saxe
51cef70d2cSBill Holler #define CSTATE_USING_HPET 1
52cef70d2cSBill Holler #define CSTATE_USING_LAT 2
53cef70d2cSBill Holler
54a3114836SGerry Liu #define CPU_IDLE_STOP_TIMEOUT 1000
55a3114836SGerry Liu
560e751525SEric Saxe extern void cpu_idle_adaptive(void);
579aa01d98SBill Holler extern uint32_t cpupm_next_cstate(cma_c_state_t *cs_data,
589aa01d98SBill Holler cpu_acpi_cstate_t *cstates, uint32_t cs_count, hrtime_t start);
590e751525SEric Saxe
600e751525SEric Saxe static int cpu_idle_init(cpu_t *);
610e751525SEric Saxe static void cpu_idle_fini(cpu_t *);
62444f66e7SMark Haywood static void cpu_idle_stop(cpu_t *);
630e751525SEric Saxe static boolean_t cpu_deep_idle_callb(void *arg, int code);
640e751525SEric Saxe static boolean_t cpu_idle_cpr_callb(void *arg, int code);
650e751525SEric Saxe static void acpi_cpu_cstate(cpu_acpi_cstate_t *cstate);
660e751525SEric Saxe
67cef70d2cSBill Holler static boolean_t cstate_use_timer(hrtime_t *lapic_expire, int timer);
68cef70d2cSBill Holler
69cef70d2cSBill Holler /*
70cef70d2cSBill Holler * the flag of always-running local APIC timer.
71cef70d2cSBill Holler * the flag of HPET Timer use in deep cstate.
72cef70d2cSBill Holler */
73cef70d2cSBill Holler static boolean_t cpu_cstate_arat = B_FALSE;
74cef70d2cSBill Holler static boolean_t cpu_cstate_hpet = B_FALSE;
75cef70d2cSBill Holler
760e751525SEric Saxe /*
770e751525SEric Saxe * Interfaces for modules implementing Intel's deep c-state.
780e751525SEric Saxe */
790e751525SEric Saxe cpupm_state_ops_t cpu_idle_ops = {
800e751525SEric Saxe "Generic ACPI C-state Support",
810e751525SEric Saxe cpu_idle_init,
820e751525SEric Saxe cpu_idle_fini,
83444f66e7SMark Haywood NULL,
84444f66e7SMark Haywood cpu_idle_stop
850e751525SEric Saxe };
860e751525SEric Saxe
870e751525SEric Saxe static kmutex_t cpu_idle_callb_mutex;
880e751525SEric Saxe static callb_id_t cpu_deep_idle_callb_id;
890e751525SEric Saxe static callb_id_t cpu_idle_cpr_callb_id;
900e751525SEric Saxe static uint_t cpu_idle_cfg_state;
910e751525SEric Saxe
920e751525SEric Saxe static kmutex_t cpu_idle_mutex;
930e751525SEric Saxe
940e751525SEric Saxe cpu_idle_kstat_t cpu_idle_kstat = {
950e751525SEric Saxe { "address_space_id", KSTAT_DATA_STRING },
960e751525SEric Saxe { "latency", KSTAT_DATA_UINT32 },
970e751525SEric Saxe { "power", KSTAT_DATA_UINT32 },
980e751525SEric Saxe };
990e751525SEric Saxe
1000e751525SEric Saxe /*
1010e751525SEric Saxe * kstat update function of the c-state info
1020e751525SEric Saxe */
1030e751525SEric Saxe static int
cpu_idle_kstat_update(kstat_t * ksp,int flag)1040e751525SEric Saxe cpu_idle_kstat_update(kstat_t *ksp, int flag)
1050e751525SEric Saxe {
1060e751525SEric Saxe cpu_acpi_cstate_t *cstate = ksp->ks_private;
1070e751525SEric Saxe
1080e751525SEric Saxe if (flag == KSTAT_WRITE) {
1090e751525SEric Saxe return (EACCES);
1100e751525SEric Saxe }
1110e751525SEric Saxe
1120e751525SEric Saxe if (cstate->cs_addrspace_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
1130e751525SEric Saxe kstat_named_setstr(&cpu_idle_kstat.addr_space_id,
1140e751525SEric Saxe "FFixedHW");
1150e751525SEric Saxe } else if (cstate->cs_addrspace_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1160e751525SEric Saxe kstat_named_setstr(&cpu_idle_kstat.addr_space_id,
1170e751525SEric Saxe "SystemIO");
1180e751525SEric Saxe } else {
1190e751525SEric Saxe kstat_named_setstr(&cpu_idle_kstat.addr_space_id,
1200e751525SEric Saxe "Unsupported");
1210e751525SEric Saxe }
1220e751525SEric Saxe
1230e751525SEric Saxe cpu_idle_kstat.cs_latency.value.ui32 = cstate->cs_latency;
1240e751525SEric Saxe cpu_idle_kstat.cs_power.value.ui32 = cstate->cs_power;
1250e751525SEric Saxe
1260e751525SEric Saxe return (0);
1270e751525SEric Saxe }
1280e751525SEric Saxe
1290e751525SEric Saxe /*
130cef70d2cSBill Holler * Used during configuration callbacks to manage implementation specific
131cef70d2cSBill Holler * details of the hardware timer used during Deep C-state.
132cef70d2cSBill Holler */
133cef70d2cSBill Holler boolean_t
cstate_timer_callback(int code)134cef70d2cSBill Holler cstate_timer_callback(int code)
135cef70d2cSBill Holler {
136cef70d2cSBill Holler if (cpu_cstate_arat) {
137cef70d2cSBill Holler return (B_TRUE);
138cef70d2cSBill Holler } else if (cpu_cstate_hpet) {
139cef70d2cSBill Holler return (hpet.callback(code));
140cef70d2cSBill Holler }
141cef70d2cSBill Holler return (B_FALSE);
142cef70d2cSBill Holler }
143cef70d2cSBill Holler
144cef70d2cSBill Holler /*
145cef70d2cSBill Holler * Some Local APIC Timers do not work during Deep C-states.
146cef70d2cSBill Holler * The Deep C-state idle function uses this function to ensure it is using a
147cef70d2cSBill Holler * hardware timer that works during Deep C-states. This function also
148cef70d2cSBill Holler * switches the timer back to the LACPI Timer after Deep C-state.
149cef70d2cSBill Holler */
150cef70d2cSBill Holler static boolean_t
cstate_use_timer(hrtime_t * lapic_expire,int timer)151cef70d2cSBill Holler cstate_use_timer(hrtime_t *lapic_expire, int timer)
152cef70d2cSBill Holler {
153cef70d2cSBill Holler if (cpu_cstate_arat)
154cef70d2cSBill Holler return (B_TRUE);
155cef70d2cSBill Holler
156cef70d2cSBill Holler /*
157cef70d2cSBill Holler * We have to return B_FALSE if no arat or hpet support
158cef70d2cSBill Holler */
159cef70d2cSBill Holler if (!cpu_cstate_hpet)
160cef70d2cSBill Holler return (B_FALSE);
161cef70d2cSBill Holler
162cef70d2cSBill Holler switch (timer) {
163cef70d2cSBill Holler case CSTATE_USING_HPET:
164cef70d2cSBill Holler return (hpet.use_hpet_timer(lapic_expire));
165cef70d2cSBill Holler case CSTATE_USING_LAT:
166cef70d2cSBill Holler hpet.use_lapic_timer(*lapic_expire);
167cef70d2cSBill Holler return (B_TRUE);
168cef70d2cSBill Holler default:
169cef70d2cSBill Holler return (B_FALSE);
170cef70d2cSBill Holler }
171cef70d2cSBill Holler }
172cef70d2cSBill Holler
173cef70d2cSBill Holler /*
1740e751525SEric Saxe * c-state wakeup function.
1750e751525SEric Saxe * Similar to cpu_wakeup and cpu_wakeup_mwait except this function deals
1760e751525SEric Saxe * with CPUs asleep in MWAIT, HLT, or ACPI Deep C-State.
1770e751525SEric Saxe */
1780e751525SEric Saxe void
cstate_wakeup(cpu_t * cp,int bound)1790e751525SEric Saxe cstate_wakeup(cpu_t *cp, int bound)
1800e751525SEric Saxe {
1810e751525SEric Saxe struct machcpu *mcpu = &(cp->cpu_m);
1820e751525SEric Saxe volatile uint32_t *mcpu_mwait = mcpu->mcpu_mwait;
1830e751525SEric Saxe cpupart_t *cpu_part;
1840e751525SEric Saxe uint_t cpu_found;
1850e751525SEric Saxe processorid_t cpu_sid;
1860e751525SEric Saxe
1870e751525SEric Saxe cpu_part = cp->cpu_part;
1880e751525SEric Saxe cpu_sid = cp->cpu_seqid;
1890e751525SEric Saxe /*
1900e751525SEric Saxe * Clear the halted bit for that CPU since it will be woken up
1910e751525SEric Saxe * in a moment.
1920e751525SEric Saxe */
1930e751525SEric Saxe if (bitset_in_set(&cpu_part->cp_haltset, cpu_sid)) {
1940e751525SEric Saxe /*
1950e751525SEric Saxe * Clear the halted bit for that CPU since it will be
1960e751525SEric Saxe * poked in a moment.
1970e751525SEric Saxe */
1980e751525SEric Saxe bitset_atomic_del(&cpu_part->cp_haltset, cpu_sid);
1990e751525SEric Saxe
2000e751525SEric Saxe /*
2010e751525SEric Saxe * We may find the current CPU present in the halted cpuset
2020e751525SEric Saxe * if we're in the context of an interrupt that occurred
2030e751525SEric Saxe * before we had a chance to clear our bit in cpu_idle().
2040e751525SEric Saxe * Waking ourself is obviously unnecessary, since if
2050e751525SEric Saxe * we're here, we're not halted.
2060e751525SEric Saxe */
2070e751525SEric Saxe if (cp != CPU) {
2080e751525SEric Saxe /*
2090e751525SEric Saxe * Use correct wakeup mechanism
2100e751525SEric Saxe */
2110e751525SEric Saxe if ((mcpu_mwait != NULL) &&
2120e751525SEric Saxe (*mcpu_mwait == MWAIT_HALTED))
2130e751525SEric Saxe MWAIT_WAKEUP(cp);
2140e751525SEric Saxe else
2150e751525SEric Saxe poke_cpu(cp->cpu_id);
2160e751525SEric Saxe }
2170e751525SEric Saxe return;
2180e751525SEric Saxe } else {
2190e751525SEric Saxe /*
2200e751525SEric Saxe * This cpu isn't halted, but it's idle or undergoing a
2210e751525SEric Saxe * context switch. No need to awaken anyone else.
2220e751525SEric Saxe */
2230e751525SEric Saxe if (cp->cpu_thread == cp->cpu_idle_thread ||
2240e751525SEric Saxe cp->cpu_disp_flags & CPU_DISP_DONTSTEAL)
2250e751525SEric Saxe return;
2260e751525SEric Saxe }
2270e751525SEric Saxe
2280e751525SEric Saxe /*
2290e751525SEric Saxe * No need to wake up other CPUs if the thread we just enqueued
2300e751525SEric Saxe * is bound.
2310e751525SEric Saxe */
2320e751525SEric Saxe if (bound)
2330e751525SEric Saxe return;
2340e751525SEric Saxe
2350e751525SEric Saxe
2360e751525SEric Saxe /*
2370e751525SEric Saxe * See if there's any other halted CPUs. If there are, then
2380e751525SEric Saxe * select one, and awaken it.
2390e751525SEric Saxe * It's possible that after we find a CPU, somebody else
2400e751525SEric Saxe * will awaken it before we get the chance.
2410e751525SEric Saxe * In that case, look again.
2420e751525SEric Saxe */
2430e751525SEric Saxe do {
2440e751525SEric Saxe cpu_found = bitset_find(&cpu_part->cp_haltset);
2450e751525SEric Saxe if (cpu_found == (uint_t)-1)
2460e751525SEric Saxe return;
2470e751525SEric Saxe
2480e751525SEric Saxe } while (bitset_atomic_test_and_del(&cpu_part->cp_haltset,
2490e751525SEric Saxe cpu_found) < 0);
2500e751525SEric Saxe
2510e751525SEric Saxe /*
2520e751525SEric Saxe * Must use correct wakeup mechanism to avoid lost wakeup of
2530e751525SEric Saxe * alternate cpu.
2540e751525SEric Saxe */
2550e751525SEric Saxe if (cpu_found != CPU->cpu_seqid) {
256cc31ad68Saubrey.li@intel.com mcpu_mwait = cpu_seq[cpu_found]->cpu_m.mcpu_mwait;
2570e751525SEric Saxe if ((mcpu_mwait != NULL) && (*mcpu_mwait == MWAIT_HALTED))
2580e751525SEric Saxe MWAIT_WAKEUP(cpu_seq[cpu_found]);
2590e751525SEric Saxe else
2600e751525SEric Saxe poke_cpu(cpu_seq[cpu_found]->cpu_id);
2610e751525SEric Saxe }
2620e751525SEric Saxe }
2630e751525SEric Saxe
2640e751525SEric Saxe /*
265fb2caebeSRandy Fishel * Function called by CPU idle notification framework to check whether CPU
266fb2caebeSRandy Fishel * has been awakened. It will be called with interrupt disabled.
267fb2caebeSRandy Fishel * If CPU has been awakened, call cpu_idle_exit() to notify CPU idle
268fb2caebeSRandy Fishel * notification framework.
269fb2caebeSRandy Fishel */
270fb2caebeSRandy Fishel static void
acpi_cpu_mwait_check_wakeup(void * arg)271fb2caebeSRandy Fishel acpi_cpu_mwait_check_wakeup(void *arg)
272fb2caebeSRandy Fishel {
273fb2caebeSRandy Fishel volatile uint32_t *mcpu_mwait = (volatile uint32_t *)arg;
274fb2caebeSRandy Fishel
275fb2caebeSRandy Fishel ASSERT(arg != NULL);
276fb2caebeSRandy Fishel if (*mcpu_mwait != MWAIT_HALTED) {
277fb2caebeSRandy Fishel /*
278fb2caebeSRandy Fishel * CPU has been awakened, notify CPU idle notification system.
279fb2caebeSRandy Fishel */
280fb2caebeSRandy Fishel cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
281fb2caebeSRandy Fishel } else {
282fb2caebeSRandy Fishel /*
283fb2caebeSRandy Fishel * Toggle interrupt flag to detect pending interrupts.
284fb2caebeSRandy Fishel * If interrupt happened, do_interrupt() will notify CPU idle
285fb2caebeSRandy Fishel * notification framework so no need to call cpu_idle_exit()
286fb2caebeSRandy Fishel * here.
287fb2caebeSRandy Fishel */
288fb2caebeSRandy Fishel sti();
289fb2caebeSRandy Fishel SMT_PAUSE();
290fb2caebeSRandy Fishel cli();
291fb2caebeSRandy Fishel }
292fb2caebeSRandy Fishel }
293fb2caebeSRandy Fishel
294fb2caebeSRandy Fishel static void
acpi_cpu_mwait_ipi_check_wakeup(void * arg)295fb2caebeSRandy Fishel acpi_cpu_mwait_ipi_check_wakeup(void *arg)
296fb2caebeSRandy Fishel {
297fb2caebeSRandy Fishel volatile uint32_t *mcpu_mwait = (volatile uint32_t *)arg;
298fb2caebeSRandy Fishel
299fb2caebeSRandy Fishel ASSERT(arg != NULL);
300fb2caebeSRandy Fishel if (*mcpu_mwait != MWAIT_WAKEUP_IPI) {
301fb2caebeSRandy Fishel /*
302fb2caebeSRandy Fishel * CPU has been awakened, notify CPU idle notification system.
303fb2caebeSRandy Fishel */
304fb2caebeSRandy Fishel cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
305fb2caebeSRandy Fishel } else {
306fb2caebeSRandy Fishel /*
307fb2caebeSRandy Fishel * Toggle interrupt flag to detect pending interrupts.
308fb2caebeSRandy Fishel * If interrupt happened, do_interrupt() will notify CPU idle
309fb2caebeSRandy Fishel * notification framework so no need to call cpu_idle_exit()
310fb2caebeSRandy Fishel * here.
311fb2caebeSRandy Fishel */
312fb2caebeSRandy Fishel sti();
313fb2caebeSRandy Fishel SMT_PAUSE();
314fb2caebeSRandy Fishel cli();
315fb2caebeSRandy Fishel }
316fb2caebeSRandy Fishel }
317fb2caebeSRandy Fishel
318fb2caebeSRandy Fishel /*ARGSUSED*/
319fb2caebeSRandy Fishel static void
acpi_cpu_check_wakeup(void * arg)320fb2caebeSRandy Fishel acpi_cpu_check_wakeup(void *arg)
321fb2caebeSRandy Fishel {
322fb2caebeSRandy Fishel /*
323fb2caebeSRandy Fishel * Toggle interrupt flag to detect pending interrupts.
324fb2caebeSRandy Fishel * If interrupt happened, do_interrupt() will notify CPU idle
325fb2caebeSRandy Fishel * notification framework so no need to call cpu_idle_exit() here.
326fb2caebeSRandy Fishel */
327fb2caebeSRandy Fishel sti();
328fb2caebeSRandy Fishel SMT_PAUSE();
329fb2caebeSRandy Fishel cli();
330fb2caebeSRandy Fishel }
331fb2caebeSRandy Fishel
332fb2caebeSRandy Fishel /*
333f7d9ddd6Siximeow * Idle the current CPU via ACPI-defined System I/O read to an ACPI-specified
334f7d9ddd6Siximeow * address.
335f7d9ddd6Siximeow */
336f7d9ddd6Siximeow static void
acpi_io_idle(uint32_t address)337f7d9ddd6Siximeow acpi_io_idle(uint32_t address)
338f7d9ddd6Siximeow {
339f7d9ddd6Siximeow uint32_t value;
340f7d9ddd6Siximeow ACPI_TABLE_FADT *gbl_FADT;
341f7d9ddd6Siximeow
342f7d9ddd6Siximeow /*
343f7d9ddd6Siximeow * Do we need to work around an ancient chipset bug in early ACPI
344f7d9ddd6Siximeow * implementations that would result in a late STPCLK# assertion?
345f7d9ddd6Siximeow *
346f7d9ddd6Siximeow * Must be true when running on systems where the ACPI-indicated I/O
347f7d9ddd6Siximeow * read to enter low-power states may resolve before actually stopping
348f7d9ddd6Siximeow * the processor that initiated a low-power transition. On such systems,
349f7d9ddd6Siximeow * it is possible the processor would proceed past the idle point and
350f7d9ddd6Siximeow * *then* be stopped.
351f7d9ddd6Siximeow *
352f7d9ddd6Siximeow * An early workaround that has been carried forward is to read the ACPI
353f7d9ddd6Siximeow * PM Timer after requesting a low-power transition. The timer read will
354f7d9ddd6Siximeow * take long enough that we are certain the processor is safe to be
355f7d9ddd6Siximeow * stopped.
356f7d9ddd6Siximeow *
357f7d9ddd6Siximeow * From some investigation, this was only ever necessary on older Intel
358f7d9ddd6Siximeow * chipsets. Additionally, the timer read can take upwards of a thousand
359f7d9ddd6Siximeow * CPU clocks, so for systems that work correctly, it's just a tarpit
360f7d9ddd6Siximeow * for the CPU as it is woken back up.
361f7d9ddd6Siximeow */
362f7d9ddd6Siximeow boolean_t need_stpclk_workaround =
363f7d9ddd6Siximeow cpuid_getvendor(CPU) == X86_VENDOR_Intel;
364f7d9ddd6Siximeow
365f7d9ddd6Siximeow /*
366f7d9ddd6Siximeow * The following call will cause us to halt which will cause the store
367f7d9ddd6Siximeow * buffer to be repartitioned, potentially exposing us to the Intel CPU
368f7d9ddd6Siximeow * vulnerability MDS. As such, we need to explicitly call that here.
369f7d9ddd6Siximeow * The other idle methods do this automatically as part of the
370f7d9ddd6Siximeow * implementation of i86_mwait().
371f7d9ddd6Siximeow */
372f7d9ddd6Siximeow x86_md_clear();
373f7d9ddd6Siximeow (void) cpu_acpi_read_port(address, &value, 8);
374f7d9ddd6Siximeow if (need_stpclk_workaround) {
375f7d9ddd6Siximeow acpica_get_global_FADT(&gbl_FADT);
376f7d9ddd6Siximeow (void) cpu_acpi_read_port(
377f7d9ddd6Siximeow gbl_FADT->XPmTimerBlock.Address,
378f7d9ddd6Siximeow &value, 32);
379f7d9ddd6Siximeow }
380f7d9ddd6Siximeow }
381f7d9ddd6Siximeow
382f7d9ddd6Siximeow /*
3830e751525SEric Saxe * enter deep c-state handler
3840e751525SEric Saxe */
3850e751525SEric Saxe static void
acpi_cpu_cstate(cpu_acpi_cstate_t * cstate)3860e751525SEric Saxe acpi_cpu_cstate(cpu_acpi_cstate_t *cstate)
3870e751525SEric Saxe {
3880e751525SEric Saxe volatile uint32_t *mcpu_mwait = CPU->cpu_m.mcpu_mwait;
389*c77fb393Siximeow uint32_t mwait_idle_state;
3900e751525SEric Saxe cpu_t *cpup = CPU;
3910e751525SEric Saxe processorid_t cpu_sid = cpup->cpu_seqid;
3920e751525SEric Saxe cpupart_t *cp = cpup->cpu_part;
3930e751525SEric Saxe hrtime_t lapic_expire;
3940e751525SEric Saxe uint8_t type = cstate->cs_addrspace_id;
3950e751525SEric Saxe uint32_t cs_type = cstate->cs_type;
3960e751525SEric Saxe int hset_update = 1;
397cef70d2cSBill Holler boolean_t using_timer;
398fb2caebeSRandy Fishel cpu_idle_check_wakeup_t check_func = &acpi_cpu_check_wakeup;
3990e751525SEric Saxe
4000e751525SEric Saxe /*
4010e751525SEric Saxe * Set our mcpu_mwait here, so we can tell if anyone tries to
4020e751525SEric Saxe * wake us between now and when we call mwait. No other cpu will
4030e751525SEric Saxe * attempt to set our mcpu_mwait until we add ourself to the haltset.
4040e751525SEric Saxe */
405*c77fb393Siximeow if (mcpu_mwait != NULL) {
406fb2caebeSRandy Fishel if (type == ACPI_ADR_SPACE_SYSTEM_IO) {
407*c77fb393Siximeow mwait_idle_state = MWAIT_WAKEUP_IPI;
408fb2caebeSRandy Fishel check_func = &acpi_cpu_mwait_ipi_check_wakeup;
409fb2caebeSRandy Fishel } else {
410*c77fb393Siximeow mwait_idle_state = MWAIT_HALTED;
411fb2caebeSRandy Fishel check_func = &acpi_cpu_mwait_check_wakeup;
412fb2caebeSRandy Fishel }
413*c77fb393Siximeow *mcpu_mwait = mwait_idle_state;
414*c77fb393Siximeow } else {
415*c77fb393Siximeow /*
416*c77fb393Siximeow * Initialize mwait_idle_state, but with mcpu_mwait NULL we'll
417*c77fb393Siximeow * never actually use it here. "MWAIT_RUNNING" just
418*c77fb393Siximeow * distinguishes from the "WAKEUP_IPI" and "HALTED" cases above.
419*c77fb393Siximeow */
420*c77fb393Siximeow mwait_idle_state = MWAIT_RUNNING;
4210e751525SEric Saxe }
4220e751525SEric Saxe
4230e751525SEric Saxe /*
4240e751525SEric Saxe * If this CPU is online, and there are multiple CPUs
4250e751525SEric Saxe * in the system, then we should note our halting
4260e751525SEric Saxe * by adding ourselves to the partition's halted CPU
4270e751525SEric Saxe * bitmap. This allows other CPUs to find/awaken us when
4280e751525SEric Saxe * work becomes available.
4290e751525SEric Saxe */
4300e751525SEric Saxe if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1)
4310e751525SEric Saxe hset_update = 0;
4320e751525SEric Saxe
4330e751525SEric Saxe /*
4340e751525SEric Saxe * Add ourselves to the partition's halted CPUs bitmask
4350e751525SEric Saxe * and set our HALTED flag, if necessary.
4360e751525SEric Saxe *
4370e751525SEric Saxe * When a thread becomes runnable, it is placed on the queue
4380e751525SEric Saxe * and then the halted cpuset is checked to determine who
4390e751525SEric Saxe * (if anyone) should be awakened. We therefore need to first
4400e751525SEric Saxe * add ourselves to the halted cpuset, and and then check if there
4410e751525SEric Saxe * is any work available.
4420e751525SEric Saxe *
4430e751525SEric Saxe * Note that memory barriers after updating the HALTED flag
4440e751525SEric Saxe * are not necessary since an atomic operation (updating the bitmap)
4450e751525SEric Saxe * immediately follows. On x86 the atomic operation acts as a
4460e751525SEric Saxe * memory barrier for the update of cpu_disp_flags.
4470e751525SEric Saxe */
4480e751525SEric Saxe if (hset_update) {
4490e751525SEric Saxe cpup->cpu_disp_flags |= CPU_DISP_HALTED;
4500e751525SEric Saxe bitset_atomic_add(&cp->cp_haltset, cpu_sid);
4510e751525SEric Saxe }
4520e751525SEric Saxe
4530e751525SEric Saxe /*
454*c77fb393Siximeow * Check to make sure there's really nothing to do. Work destined for
455*c77fb393Siximeow * this CPU may become available after this check. If we're
456*c77fb393Siximeow * mwait-halting we'll be notified through the clearing of our bit in
457*c77fb393Siximeow * the halted CPU bitmask, and a write to our mcpu_mwait. Otherwise,
458*c77fb393Siximeow * we're hlt-based halting, and we'll be immediately woken by the
459*c77fb393Siximeow * pending interrupt.
4600e751525SEric Saxe *
4610e751525SEric Saxe * disp_anywork() checks disp_nrunnable, so we do not have to later.
4620e751525SEric Saxe */
4630e751525SEric Saxe if (disp_anywork()) {
4640e751525SEric Saxe if (hset_update) {
4650e751525SEric Saxe cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
4660e751525SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid);
4670e751525SEric Saxe }
4680e751525SEric Saxe return;
4690e751525SEric Saxe }
4700e751525SEric Saxe
4710e751525SEric Saxe /*
4720e751525SEric Saxe * We're on our way to being halted.
4730e751525SEric Saxe *
4740e751525SEric Saxe * The local APIC timer can stop in ACPI C2 and deeper c-states.
475cef70d2cSBill Holler * Try to program the HPET hardware to substitute for this CPU's
476cef70d2cSBill Holler * LAPIC timer.
477cef70d2cSBill Holler * cstate_use_timer() could disable the LAPIC Timer. Make sure
478cef70d2cSBill Holler * to start the LAPIC Timer again before leaving this function.
4790e751525SEric Saxe *
480cef70d2cSBill Holler * Disable interrupts here so we will awaken immediately after halting
481cef70d2cSBill Holler * if someone tries to poke us between now and the time we actually
482cef70d2cSBill Holler * halt.
4830e751525SEric Saxe */
484cef70d2cSBill Holler cli();
485cef70d2cSBill Holler using_timer = cstate_use_timer(&lapic_expire, CSTATE_USING_HPET);
4860e751525SEric Saxe
4870e751525SEric Saxe /*
4880e751525SEric Saxe * We check for the presence of our bit after disabling interrupts.
4890e751525SEric Saxe * If it's cleared, we'll return. If the bit is cleared after
4900e751525SEric Saxe * we check then the cstate_wakeup() will pop us out of the halted
4910e751525SEric Saxe * state.
4920e751525SEric Saxe *
4930e751525SEric Saxe * This means that the ordering of the cstate_wakeup() and the clearing
4940e751525SEric Saxe * of the bit by cpu_wakeup is important.
4950e751525SEric Saxe * cpu_wakeup() must clear our mc_haltset bit, and then call
4960e751525SEric Saxe * cstate_wakeup().
4970e751525SEric Saxe * acpi_cpu_cstate() must disable interrupts, then check for the bit.
4980e751525SEric Saxe */
4990e751525SEric Saxe if (hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid) == 0) {
500cef70d2cSBill Holler (void) cstate_use_timer(&lapic_expire,
501cef70d2cSBill Holler CSTATE_USING_LAT);
502cef70d2cSBill Holler sti();
5030e751525SEric Saxe cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
5040e751525SEric Saxe return;
5050e751525SEric Saxe }
5060e751525SEric Saxe
5070e751525SEric Saxe /*
5080e751525SEric Saxe * The check for anything locally runnable is here for performance
5090e751525SEric Saxe * and isn't needed for correctness. disp_nrunnable ought to be
5100e751525SEric Saxe * in our cache still, so it's inexpensive to check, and if there
5110e751525SEric Saxe * is anything runnable we won't have to wait for the poke.
5120e751525SEric Saxe */
5130e751525SEric Saxe if (cpup->cpu_disp->disp_nrunnable != 0) {
514cef70d2cSBill Holler (void) cstate_use_timer(&lapic_expire,
515cef70d2cSBill Holler CSTATE_USING_LAT);
516cef70d2cSBill Holler sti();
5170e751525SEric Saxe if (hset_update) {
5180e751525SEric Saxe cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
5190e751525SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid);
5200e751525SEric Saxe }
5210e751525SEric Saxe return;
5220e751525SEric Saxe }
5230e751525SEric Saxe
524cef70d2cSBill Holler if (using_timer == B_FALSE) {
5250e751525SEric Saxe
526cef70d2cSBill Holler (void) cstate_use_timer(&lapic_expire,
527cef70d2cSBill Holler CSTATE_USING_LAT);
528cef70d2cSBill Holler sti();
5290e751525SEric Saxe
5300e751525SEric Saxe /*
5310e751525SEric Saxe * We are currently unable to program the HPET to act as this
532cef70d2cSBill Holler * CPU's proxy LAPIC timer. This CPU cannot enter C2 or deeper
533cef70d2cSBill Holler * because no timer is set to wake it up while its LAPIC timer
5340e751525SEric Saxe * stalls in deep C-States.
5350e751525SEric Saxe * Enter C1 instead.
5360e751525SEric Saxe *
537*c77fb393Siximeow * cstate_wakeup() will wake this CPU with an IPI, which works
538*c77fb393Siximeow * with either MWAIT or HLT.
5390e751525SEric Saxe */
540*c77fb393Siximeow if (mcpu_mwait != NULL) {
5410e751525SEric Saxe i86_monitor(mcpu_mwait, 0, 0);
542*c77fb393Siximeow if (*mcpu_mwait == MWAIT_HALTED) {
543fb2caebeSRandy Fishel if (cpu_idle_enter(IDLE_STATE_C1, 0,
544fb2caebeSRandy Fishel check_func, (void *)mcpu_mwait) == 0) {
545*c77fb393Siximeow if (*mcpu_mwait == MWAIT_HALTED) {
5460e751525SEric Saxe i86_mwait(0, 0);
547fb2caebeSRandy Fishel }
548fb2caebeSRandy Fishel cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
549fb2caebeSRandy Fishel }
5500e751525SEric Saxe }
551*c77fb393Siximeow } else {
552*c77fb393Siximeow if (cpu_idle_enter(cs_type, 0, check_func, NULL) == 0) {
553*c77fb393Siximeow mach_cpu_idle();
554*c77fb393Siximeow cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
555*c77fb393Siximeow }
556*c77fb393Siximeow }
5570e751525SEric Saxe
5580e751525SEric Saxe /*
5590e751525SEric Saxe * We're no longer halted
5600e751525SEric Saxe */
5610e751525SEric Saxe if (hset_update) {
5620e751525SEric Saxe cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
5630e751525SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid);
5640e751525SEric Saxe }
5650e751525SEric Saxe return;
5660e751525SEric Saxe }
5670e751525SEric Saxe
568*c77fb393Siximeow /*
569*c77fb393Siximeow * Tell the cpu idle framework we're going to try idling.
570*c77fb393Siximeow *
571*c77fb393Siximeow * If cpu_idle_enter returns nonzero, we've found out at the last minute
572*c77fb393Siximeow * that we don't actually want to idle.
573*c77fb393Siximeow */
574*c77fb393Siximeow boolean_t idle_ok = cpu_idle_enter(cs_type, 0, check_func,
575*c77fb393Siximeow (void *)mcpu_mwait) == 0;
576*c77fb393Siximeow
577*c77fb393Siximeow if (idle_ok) {
5780e751525SEric Saxe if (type == ACPI_ADR_SPACE_FIXED_HARDWARE) {
579*c77fb393Siximeow if (mcpu_mwait != NULL) {
5800e751525SEric Saxe /*
5810e751525SEric Saxe * We're on our way to being halted.
582*c77fb393Siximeow * To avoid a lost wakeup, arm the monitor
583*c77fb393Siximeow * before checking if another cpu wrote to
584*c77fb393Siximeow * mcpu_mwait to wake us up.
5850e751525SEric Saxe */
5860e751525SEric Saxe i86_monitor(mcpu_mwait, 0, 0);
587*c77fb393Siximeow if (*mcpu_mwait == mwait_idle_state) {
588fb2caebeSRandy Fishel i86_mwait(cstate->cs_address, 1);
589fb2caebeSRandy Fishel }
590*c77fb393Siximeow } else {
591*c77fb393Siximeow mach_cpu_idle();
5920e751525SEric Saxe }
5930e751525SEric Saxe } else if (type == ACPI_ADR_SPACE_SYSTEM_IO) {
594*c77fb393Siximeow /*
595*c77fb393Siximeow * mcpu_mwait is not directly part of idling or wakeup
596*c77fb393Siximeow * in the ACPI System I/O case, but if available it can
597*c77fb393Siximeow * hint that we shouldn't actually try to idle because
598*c77fb393Siximeow * we're about to be woken up anyway.
599*c77fb393Siximeow *
600*c77fb393Siximeow * A trip through idle/wakeup can be upwards of a few
601*c77fb393Siximeow * microseconds, so avoiding that makes this a helpful
602*c77fb393Siximeow * optimization, but consulting mcpu_mwait is still not
603*c77fb393Siximeow * necessary for correctness here.
604*c77fb393Siximeow */
605*c77fb393Siximeow if (!mcpu_mwait || *mcpu_mwait == mwait_idle_state) {
606f7d9ddd6Siximeow acpi_io_idle(cstate->cs_address);
607fb2caebeSRandy Fishel }
608*c77fb393Siximeow }
609*c77fb393Siximeow
610*c77fb393Siximeow /*
611*c77fb393Siximeow * We've either idled and woken up, or decided not to idle.
612*c77fb393Siximeow * Either way, tell the cpu idle framework that we're not trying
613*c77fb393Siximeow * to idle anymore.
614*c77fb393Siximeow */
615fb2caebeSRandy Fishel cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
616fb2caebeSRandy Fishel }
6170e751525SEric Saxe
6180e751525SEric Saxe /*
619cef70d2cSBill Holler * The LAPIC timer may have stopped in deep c-state.
620cef70d2cSBill Holler * Reprogram this CPU's LAPIC here before enabling interrupts.
6210e751525SEric Saxe */
622cef70d2cSBill Holler (void) cstate_use_timer(&lapic_expire, CSTATE_USING_LAT);
623cef70d2cSBill Holler sti();
6240e751525SEric Saxe
6250e751525SEric Saxe /*
6260e751525SEric Saxe * We're no longer halted
6270e751525SEric Saxe */
6280e751525SEric Saxe if (hset_update) {
6290e751525SEric Saxe cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
6300e751525SEric Saxe bitset_atomic_del(&cp->cp_haltset, cpu_sid);
6310e751525SEric Saxe }
6320e751525SEric Saxe }
6330e751525SEric Saxe
6340e751525SEric Saxe /*
6350e751525SEric Saxe * Idle the present CPU, deep c-state is supported
6360e751525SEric Saxe */
6370e751525SEric Saxe void
cpu_acpi_idle(void)6380e751525SEric Saxe cpu_acpi_idle(void)
6390e751525SEric Saxe {
6400e751525SEric Saxe cpu_t *cp = CPU;
6410e751525SEric Saxe cpu_acpi_handle_t handle;
6420e751525SEric Saxe cma_c_state_t *cs_data;
6439aa01d98SBill Holler cpu_acpi_cstate_t *cstates;
6440e751525SEric Saxe hrtime_t start, end;
6450e751525SEric Saxe int cpu_max_cstates;
6469aa01d98SBill Holler uint32_t cs_indx;
6479aa01d98SBill Holler uint16_t cs_type;
6480e751525SEric Saxe
6490e751525SEric Saxe cpupm_mach_state_t *mach_state =
6500e751525SEric Saxe (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
6510e751525SEric Saxe handle = mach_state->ms_acpi_handle;
6520e751525SEric Saxe ASSERT(CPU_ACPI_CSTATES(handle) != NULL);
6530e751525SEric Saxe
6540e751525SEric Saxe cs_data = mach_state->ms_cstate.cma_state.cstate;
6559aa01d98SBill Holler cstates = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
6569aa01d98SBill Holler ASSERT(cstates != NULL);
6570e751525SEric Saxe cpu_max_cstates = cpu_acpi_get_max_cstates(handle);
6580e751525SEric Saxe if (cpu_max_cstates > CPU_MAX_CSTATES)
6590e751525SEric Saxe cpu_max_cstates = CPU_MAX_CSTATES;
6609aa01d98SBill Holler if (cpu_max_cstates == 1) { /* no ACPI c-state data */
6619aa01d98SBill Holler (*non_deep_idle_cpu)();
6629aa01d98SBill Holler return;
6639aa01d98SBill Holler }
6640e751525SEric Saxe
6650e751525SEric Saxe start = gethrtime_unscaled();
6660e751525SEric Saxe
6679aa01d98SBill Holler cs_indx = cpupm_next_cstate(cs_data, cstates, cpu_max_cstates, start);
6680e751525SEric Saxe
6699aa01d98SBill Holler cs_type = cstates[cs_indx].cs_type;
6700e751525SEric Saxe
6710e751525SEric Saxe switch (cs_type) {
6720e751525SEric Saxe default:
6730e751525SEric Saxe /* FALLTHROUGH */
6740e751525SEric Saxe case CPU_ACPI_C1:
6750e751525SEric Saxe (*non_deep_idle_cpu)();
6760e751525SEric Saxe break;
6770e751525SEric Saxe
6780e751525SEric Saxe case CPU_ACPI_C2:
6799aa01d98SBill Holler acpi_cpu_cstate(&cstates[cs_indx]);
6800e751525SEric Saxe break;
6810e751525SEric Saxe
6820e751525SEric Saxe case CPU_ACPI_C3:
6830e751525SEric Saxe /*
68456b56c0dSBill Holler * All supported Intel processors maintain cache coherency
68556b56c0dSBill Holler * during C3. Currently when entering C3 processors flush
68656b56c0dSBill Holler * core caches to higher level shared cache. The shared cache
68756b56c0dSBill Holler * maintains state and supports probes during C3.
68856b56c0dSBill Holler * Consequently there is no need to handle cache coherency
68956b56c0dSBill Holler * and Bus Master activity here with the cache flush, BM_RLD
69056b56c0dSBill Holler * bit, BM_STS bit, nor PM2_CNT.ARB_DIS mechanisms described
69156b56c0dSBill Holler * in section 8.1.4 of the ACPI Specification 4.0.
6920e751525SEric Saxe */
6939aa01d98SBill Holler acpi_cpu_cstate(&cstates[cs_indx]);
6940e751525SEric Saxe break;
6950e751525SEric Saxe }
6960e751525SEric Saxe
6970e751525SEric Saxe end = gethrtime_unscaled();
6980e751525SEric Saxe
6990e751525SEric Saxe /*
7000e751525SEric Saxe * Update statistics
7010e751525SEric Saxe */
7020e751525SEric Saxe cpupm_wakeup_cstate_data(cs_data, end);
7030e751525SEric Saxe }
7040e751525SEric Saxe
7050e751525SEric Saxe boolean_t
cpu_deep_cstates_supported(void)7060e751525SEric Saxe cpu_deep_cstates_supported(void)
7070e751525SEric Saxe {
7080e751525SEric Saxe extern int idle_cpu_no_deep_c;
7090e751525SEric Saxe
7100e751525SEric Saxe if (idle_cpu_no_deep_c)
7110e751525SEric Saxe return (B_FALSE);
7120e751525SEric Saxe
7130e751525SEric Saxe if (!cpuid_deep_cstates_supported())
7140e751525SEric Saxe return (B_FALSE);
7150e751525SEric Saxe
716cef70d2cSBill Holler if (cpuid_arat_supported()) {
717cef70d2cSBill Holler cpu_cstate_arat = B_TRUE;
7180e751525SEric Saxe return (B_TRUE);
7190e751525SEric Saxe }
7200e751525SEric Saxe
721f7d9ddd6Siximeow /*
722f7d9ddd6Siximeow * In theory we can use the HPET as a proxy timer in case we can't rely
723f7d9ddd6Siximeow * on the LAPIC in deep C-states. In practice on AMD it seems something
724f7d9ddd6Siximeow * isn't quite right and we just don't get woken up, so the proxy timer
725f7d9ddd6Siximeow * approach doesn't work. Only set up the HPET as proxy timer on Intel
726f7d9ddd6Siximeow * systems for now.
727f7d9ddd6Siximeow */
728f7d9ddd6Siximeow if (cpuid_getvendor(CPU) == X86_VENDOR_Intel &&
729f7d9ddd6Siximeow (hpet.supported == HPET_FULL_SUPPORT) &&
730cef70d2cSBill Holler hpet.install_proxy()) {
731cef70d2cSBill Holler cpu_cstate_hpet = B_TRUE;
732cef70d2cSBill Holler return (B_TRUE);
733cef70d2cSBill Holler }
734cef70d2cSBill Holler
735cef70d2cSBill Holler return (B_FALSE);
736cef70d2cSBill Holler }
737cef70d2cSBill Holler
7380e751525SEric Saxe /*
7390e751525SEric Saxe * Validate that this processor supports deep cstate and if so,
7400e751525SEric Saxe * get the c-state data from ACPI and cache it.
7410e751525SEric Saxe */
7420e751525SEric Saxe static int
cpu_idle_init(cpu_t * cp)7430e751525SEric Saxe cpu_idle_init(cpu_t *cp)
7440e751525SEric Saxe {
7450e751525SEric Saxe cpupm_mach_state_t *mach_state =
7460e751525SEric Saxe (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
7470e751525SEric Saxe cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
7480e751525SEric Saxe cpu_acpi_cstate_t *cstate;
7490e751525SEric Saxe char name[KSTAT_STRLEN];
7500e751525SEric Saxe int cpu_max_cstates, i;
75100f97612SMark Haywood int ret;
7520e751525SEric Saxe
7530e751525SEric Saxe /*
7540e751525SEric Saxe * Cache the C-state specific ACPI data.
7550e751525SEric Saxe */
75600f97612SMark Haywood if ((ret = cpu_acpi_cache_cstate_data(handle)) != 0) {
75700f97612SMark Haywood if (ret < 0)
7580e751525SEric Saxe cmn_err(CE_NOTE,
75900f97612SMark Haywood "!Support for CPU deep idle states is being "
76000f97612SMark Haywood "disabled due to errors parsing ACPI C-state "
76100f97612SMark Haywood "objects exported by BIOS.");
7620e751525SEric Saxe cpu_idle_fini(cp);
7630e751525SEric Saxe return (-1);
7640e751525SEric Saxe }
7650e751525SEric Saxe
7660e751525SEric Saxe cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
7670e751525SEric Saxe
7680e751525SEric Saxe cpu_max_cstates = cpu_acpi_get_max_cstates(handle);
7690e751525SEric Saxe
7700e751525SEric Saxe for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) {
7710e751525SEric Saxe (void) snprintf(name, KSTAT_STRLEN - 1, "c%d", cstate->cs_type);
7720e751525SEric Saxe /*
7730e751525SEric Saxe * Allocate, initialize and install cstate kstat
7740e751525SEric Saxe */
775a3114836SGerry Liu cstate->cs_ksp = kstat_create("cstate", cp->cpu_id,
7760e751525SEric Saxe name, "misc",
7770e751525SEric Saxe KSTAT_TYPE_NAMED,
7780e751525SEric Saxe sizeof (cpu_idle_kstat) / sizeof (kstat_named_t),
7790e751525SEric Saxe KSTAT_FLAG_VIRTUAL);
7800e751525SEric Saxe
7810e751525SEric Saxe if (cstate->cs_ksp == NULL) {
7820e751525SEric Saxe cmn_err(CE_NOTE, "kstat_create(c_state) fail");
7830e751525SEric Saxe } else {
7840e751525SEric Saxe cstate->cs_ksp->ks_data = &cpu_idle_kstat;
7850e751525SEric Saxe cstate->cs_ksp->ks_lock = &cpu_idle_mutex;
7860e751525SEric Saxe cstate->cs_ksp->ks_update = cpu_idle_kstat_update;
7870e751525SEric Saxe cstate->cs_ksp->ks_data_size += MAXNAMELEN;
7880e751525SEric Saxe cstate->cs_ksp->ks_private = cstate;
7890e751525SEric Saxe kstat_install(cstate->cs_ksp);
7900e751525SEric Saxe }
791a3114836SGerry Liu cstate++;
7920e751525SEric Saxe }
7930e751525SEric Saxe
7940e751525SEric Saxe cpupm_alloc_domains(cp, CPUPM_C_STATES);
7950e751525SEric Saxe cpupm_alloc_ms_cstate(cp);
7960e751525SEric Saxe
7970e751525SEric Saxe if (cpu_deep_cstates_supported()) {
79856b56c0dSBill Holler uint32_t value;
79956b56c0dSBill Holler
8000e751525SEric Saxe mutex_enter(&cpu_idle_callb_mutex);
8010e751525SEric Saxe if (cpu_deep_idle_callb_id == (callb_id_t)0)
8020e751525SEric Saxe cpu_deep_idle_callb_id = callb_add(&cpu_deep_idle_callb,
8030e751525SEric Saxe (void *)NULL, CB_CL_CPU_DEEP_IDLE, "cpu_deep_idle");
8040e751525SEric Saxe if (cpu_idle_cpr_callb_id == (callb_id_t)0)
8050e751525SEric Saxe cpu_idle_cpr_callb_id = callb_add(&cpu_idle_cpr_callb,
8060e751525SEric Saxe (void *)NULL, CB_CL_CPR_PM, "cpu_idle_cpr");
8070e751525SEric Saxe mutex_exit(&cpu_idle_callb_mutex);
80856b56c0dSBill Holler
80956b56c0dSBill Holler
81056b56c0dSBill Holler /*
81156b56c0dSBill Holler * All supported CPUs (Nehalem and later) will remain in C3
81256b56c0dSBill Holler * during Bus Master activity.
81356b56c0dSBill Holler * All CPUs set ACPI_BITREG_BUS_MASTER_RLD to 0 here if it
81456b56c0dSBill Holler * is not already 0 before enabling Deeper C-states.
81556b56c0dSBill Holler */
81656b56c0dSBill Holler cpu_acpi_get_register(ACPI_BITREG_BUS_MASTER_RLD, &value);
81756b56c0dSBill Holler if (value & 1)
81856b56c0dSBill Holler cpu_acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
8190e751525SEric Saxe }
8200e751525SEric Saxe
8210e751525SEric Saxe return (0);
8220e751525SEric Saxe }
8230e751525SEric Saxe
8240e751525SEric Saxe /*
8250e751525SEric Saxe * Free resources allocated by cpu_idle_init().
8260e751525SEric Saxe */
8270e751525SEric Saxe static void
cpu_idle_fini(cpu_t * cp)8280e751525SEric Saxe cpu_idle_fini(cpu_t *cp)
8290e751525SEric Saxe {
8300e751525SEric Saxe cpupm_mach_state_t *mach_state =
8310e751525SEric Saxe (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
8320e751525SEric Saxe cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
8330e751525SEric Saxe cpu_acpi_cstate_t *cstate;
8340e751525SEric Saxe uint_t cpu_max_cstates, i;
8350e751525SEric Saxe
8360e751525SEric Saxe /*
8370e751525SEric Saxe * idle cpu points back to the generic one
8380e751525SEric Saxe */
8396af9d452Saubrey.li@intel.com idle_cpu = cp->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu;
8400e751525SEric Saxe disp_enq_thread = non_deep_idle_disp_enq_thread;
8410e751525SEric Saxe
8420e751525SEric Saxe cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
8430e751525SEric Saxe if (cstate) {
8440e751525SEric Saxe cpu_max_cstates = cpu_acpi_get_max_cstates(handle);
8450e751525SEric Saxe
8460e751525SEric Saxe for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) {
8470e751525SEric Saxe if (cstate->cs_ksp != NULL)
8480e751525SEric Saxe kstat_delete(cstate->cs_ksp);
8490e751525SEric Saxe cstate++;
8500e751525SEric Saxe }
8510e751525SEric Saxe }
8520e751525SEric Saxe
8530e751525SEric Saxe cpupm_free_ms_cstate(cp);
8540e751525SEric Saxe cpupm_free_domains(&cpupm_cstate_domains);
8550e751525SEric Saxe cpu_acpi_free_cstate_data(handle);
8560e751525SEric Saxe
8570e751525SEric Saxe mutex_enter(&cpu_idle_callb_mutex);
8580e751525SEric Saxe if (cpu_deep_idle_callb_id != (callb_id_t)0) {
8590e751525SEric Saxe (void) callb_delete(cpu_deep_idle_callb_id);
8600e751525SEric Saxe cpu_deep_idle_callb_id = (callb_id_t)0;
8610e751525SEric Saxe }
8620e751525SEric Saxe if (cpu_idle_cpr_callb_id != (callb_id_t)0) {
8630e751525SEric Saxe (void) callb_delete(cpu_idle_cpr_callb_id);
8640e751525SEric Saxe cpu_idle_cpr_callb_id = (callb_id_t)0;
8650e751525SEric Saxe }
8660e751525SEric Saxe mutex_exit(&cpu_idle_callb_mutex);
8670e751525SEric Saxe }
8680e751525SEric Saxe
869a3114836SGerry Liu /*
870a3114836SGerry Liu * This function is introduced here to solve a race condition
871a3114836SGerry Liu * between the master and the slave to touch c-state data structure.
872a3114836SGerry Liu * After the slave calls this idle function to switch to the non
873a3114836SGerry Liu * deep idle function, the master can go on to reclaim the resource.
874a3114836SGerry Liu */
875a3114836SGerry Liu static void
cpu_idle_stop_sync(void)876a3114836SGerry Liu cpu_idle_stop_sync(void)
877a3114836SGerry Liu {
878a3114836SGerry Liu /* switch to the non deep idle function */
879a3114836SGerry Liu CPU->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu;
880a3114836SGerry Liu }
881a3114836SGerry Liu
882444f66e7SMark Haywood static void
cpu_idle_stop(cpu_t * cp)883444f66e7SMark Haywood cpu_idle_stop(cpu_t *cp)
884444f66e7SMark Haywood {
885444f66e7SMark Haywood cpupm_mach_state_t *mach_state =
886444f66e7SMark Haywood (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
887444f66e7SMark Haywood cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
888444f66e7SMark Haywood cpu_acpi_cstate_t *cstate;
889a3114836SGerry Liu uint_t cpu_max_cstates, i = 0;
890a3114836SGerry Liu
891a3114836SGerry Liu mutex_enter(&cpu_idle_callb_mutex);
892a3114836SGerry Liu if (idle_cpu == cpu_idle_adaptive) {
893a3114836SGerry Liu /*
894a3114836SGerry Liu * invoke the slave to call synchronous idle function.
895a3114836SGerry Liu */
896a3114836SGerry Liu cp->cpu_m.mcpu_idle_cpu = cpu_idle_stop_sync;
897a3114836SGerry Liu poke_cpu(cp->cpu_id);
898444f66e7SMark Haywood
8996af9d452Saubrey.li@intel.com /*
900a3114836SGerry Liu * wait until the slave switchs to non deep idle function,
901a3114836SGerry Liu * so that the master is safe to go on to reclaim the resource.
9026af9d452Saubrey.li@intel.com */
903a3114836SGerry Liu while (cp->cpu_m.mcpu_idle_cpu != non_deep_idle_cpu) {
904a3114836SGerry Liu drv_usecwait(10);
905a3114836SGerry Liu if ((++i % CPU_IDLE_STOP_TIMEOUT) == 0)
906a3114836SGerry Liu cmn_err(CE_NOTE, "!cpu_idle_stop: the slave"
907a3114836SGerry Liu " idle stop timeout");
908a3114836SGerry Liu }
909a3114836SGerry Liu }
910a3114836SGerry Liu mutex_exit(&cpu_idle_callb_mutex);
9116af9d452Saubrey.li@intel.com
912444f66e7SMark Haywood cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
913444f66e7SMark Haywood if (cstate) {
914444f66e7SMark Haywood cpu_max_cstates = cpu_acpi_get_max_cstates(handle);
915444f66e7SMark Haywood
916444f66e7SMark Haywood for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) {
917444f66e7SMark Haywood if (cstate->cs_ksp != NULL)
918444f66e7SMark Haywood kstat_delete(cstate->cs_ksp);
919444f66e7SMark Haywood cstate++;
920444f66e7SMark Haywood }
921444f66e7SMark Haywood }
922444f66e7SMark Haywood cpupm_free_ms_cstate(cp);
923444f66e7SMark Haywood cpupm_remove_domains(cp, CPUPM_C_STATES, &cpupm_cstate_domains);
924444f66e7SMark Haywood cpu_acpi_free_cstate_data(handle);
925444f66e7SMark Haywood }
926444f66e7SMark Haywood
9270e751525SEric Saxe /*ARGSUSED*/
9280e751525SEric Saxe static boolean_t
cpu_deep_idle_callb(void * arg,int code)9290e751525SEric Saxe cpu_deep_idle_callb(void *arg, int code)
9300e751525SEric Saxe {
9310e751525SEric Saxe boolean_t rslt = B_TRUE;
9320e751525SEric Saxe
9330e751525SEric Saxe mutex_enter(&cpu_idle_callb_mutex);
9340e751525SEric Saxe switch (code) {
9350e751525SEric Saxe case PM_DEFAULT_CPU_DEEP_IDLE:
9360e751525SEric Saxe /*
9370e751525SEric Saxe * Default policy is same as enable
9380e751525SEric Saxe */
9390e751525SEric Saxe /*FALLTHROUGH*/
9400e751525SEric Saxe case PM_ENABLE_CPU_DEEP_IDLE:
9410e751525SEric Saxe if ((cpu_idle_cfg_state & CPU_IDLE_DEEP_CFG) == 0)
9420e751525SEric Saxe break;
9430e751525SEric Saxe
944cef70d2cSBill Holler if (cstate_timer_callback(PM_ENABLE_CPU_DEEP_IDLE)) {
9450e751525SEric Saxe disp_enq_thread = cstate_wakeup;
9460e751525SEric Saxe idle_cpu = cpu_idle_adaptive;
9470e751525SEric Saxe cpu_idle_cfg_state &= ~CPU_IDLE_DEEP_CFG;
9480e751525SEric Saxe } else {
9490e751525SEric Saxe rslt = B_FALSE;
9500e751525SEric Saxe }
9510e751525SEric Saxe break;
9520e751525SEric Saxe
9530e751525SEric Saxe case PM_DISABLE_CPU_DEEP_IDLE:
9540e751525SEric Saxe if (cpu_idle_cfg_state & CPU_IDLE_DEEP_CFG)
9550e751525SEric Saxe break;
9560e751525SEric Saxe
9570e751525SEric Saxe idle_cpu = non_deep_idle_cpu;
958cef70d2cSBill Holler if (cstate_timer_callback(PM_DISABLE_CPU_DEEP_IDLE)) {
9590e751525SEric Saxe disp_enq_thread = non_deep_idle_disp_enq_thread;
9600e751525SEric Saxe cpu_idle_cfg_state |= CPU_IDLE_DEEP_CFG;
9610e751525SEric Saxe }
9620e751525SEric Saxe break;
9630e751525SEric Saxe
9640e751525SEric Saxe default:
9650e751525SEric Saxe cmn_err(CE_NOTE, "!cpu deep_idle_callb: invalid code %d\n",
9660e751525SEric Saxe code);
9670e751525SEric Saxe break;
9680e751525SEric Saxe }
9690e751525SEric Saxe mutex_exit(&cpu_idle_callb_mutex);
9700e751525SEric Saxe return (rslt);
9710e751525SEric Saxe }
9720e751525SEric Saxe
9730e751525SEric Saxe /*ARGSUSED*/
9740e751525SEric Saxe static boolean_t
cpu_idle_cpr_callb(void * arg,int code)9750e751525SEric Saxe cpu_idle_cpr_callb(void *arg, int code)
9760e751525SEric Saxe {
9770e751525SEric Saxe boolean_t rslt = B_TRUE;
9780e751525SEric Saxe
9790e751525SEric Saxe mutex_enter(&cpu_idle_callb_mutex);
9800e751525SEric Saxe switch (code) {
9810e751525SEric Saxe case CB_CODE_CPR_RESUME:
982cef70d2cSBill Holler if (cstate_timer_callback(CB_CODE_CPR_RESUME)) {
9830e751525SEric Saxe /*
9840e751525SEric Saxe * Do not enable dispatcher hooks if disabled by user.
9850e751525SEric Saxe */
9860e751525SEric Saxe if (cpu_idle_cfg_state & CPU_IDLE_DEEP_CFG)
9870e751525SEric Saxe break;
9880e751525SEric Saxe
9890e751525SEric Saxe disp_enq_thread = cstate_wakeup;
9900e751525SEric Saxe idle_cpu = cpu_idle_adaptive;
9910e751525SEric Saxe } else {
9920e751525SEric Saxe rslt = B_FALSE;
9930e751525SEric Saxe }
9940e751525SEric Saxe break;
9950e751525SEric Saxe
9960e751525SEric Saxe case CB_CODE_CPR_CHKPT:
9970e751525SEric Saxe idle_cpu = non_deep_idle_cpu;
9980e751525SEric Saxe disp_enq_thread = non_deep_idle_disp_enq_thread;
999cef70d2cSBill Holler (void) cstate_timer_callback(CB_CODE_CPR_CHKPT);
10000e751525SEric Saxe break;
10010e751525SEric Saxe
10020e751525SEric Saxe default:
10030e751525SEric Saxe cmn_err(CE_NOTE, "!cpudvr cpr_callb: invalid code %d\n", code);
10040e751525SEric Saxe break;
10050e751525SEric Saxe }
10060e751525SEric Saxe mutex_exit(&cpu_idle_callb_mutex);
10070e751525SEric Saxe return (rslt);
10080e751525SEric Saxe }
10090e751525SEric Saxe
10100e751525SEric Saxe /*
10110e751525SEric Saxe * handle _CST notification
10120e751525SEric Saxe */
10130e751525SEric Saxe void
cpuidle_cstate_instance(cpu_t * cp)10140e751525SEric Saxe cpuidle_cstate_instance(cpu_t *cp)
10150e751525SEric Saxe {
10160e751525SEric Saxe #ifndef __xpv
10170e751525SEric Saxe cpupm_mach_state_t *mach_state =
10180e751525SEric Saxe (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
10190e751525SEric Saxe cpu_acpi_handle_t handle;
10200e751525SEric Saxe struct machcpu *mcpu;
10210e751525SEric Saxe cpuset_t dom_cpu_set;
10220e751525SEric Saxe kmutex_t *pm_lock;
10230e751525SEric Saxe int result = 0;
10240e751525SEric Saxe processorid_t cpu_id;
10250e751525SEric Saxe
10260e751525SEric Saxe if (mach_state == NULL) {
10270e751525SEric Saxe return;
10280e751525SEric Saxe }
10290e751525SEric Saxe
10300e751525SEric Saxe ASSERT(mach_state->ms_cstate.cma_domain != NULL);
10310e751525SEric Saxe dom_cpu_set = mach_state->ms_cstate.cma_domain->pm_cpus;
10320e751525SEric Saxe pm_lock = &mach_state->ms_cstate.cma_domain->pm_lock;
10330e751525SEric Saxe
10340e751525SEric Saxe /*
10350e751525SEric Saxe * Do for all the CPU's in the domain
10360e751525SEric Saxe */
10370e751525SEric Saxe mutex_enter(pm_lock);
10380e751525SEric Saxe do {
10390e751525SEric Saxe CPUSET_FIND(dom_cpu_set, cpu_id);
10400e751525SEric Saxe if (cpu_id == CPUSET_NOTINSET)
10410e751525SEric Saxe break;
10420e751525SEric Saxe
10430e751525SEric Saxe ASSERT(cpu_id >= 0 && cpu_id < NCPU);
10440e751525SEric Saxe cp = cpu[cpu_id];
10450e751525SEric Saxe mach_state = (cpupm_mach_state_t *)
10460e751525SEric Saxe cp->cpu_m.mcpu_pm_mach_state;
10470e751525SEric Saxe if (!(mach_state->ms_caps & CPUPM_C_STATES)) {
10480e751525SEric Saxe mutex_exit(pm_lock);
10490e751525SEric Saxe return;
10500e751525SEric Saxe }
10510e751525SEric Saxe handle = mach_state->ms_acpi_handle;
10520e751525SEric Saxe ASSERT(handle != NULL);
10530e751525SEric Saxe
10540e751525SEric Saxe /*
10550e751525SEric Saxe * re-evaluate cstate object
10560e751525SEric Saxe */
10570e751525SEric Saxe if (cpu_acpi_cache_cstate_data(handle) != 0) {
10580e751525SEric Saxe cmn_err(CE_WARN, "Cannot re-evaluate the cpu c-state"
10590e751525SEric Saxe " object Instance: %d", cpu_id);
10600e751525SEric Saxe }
10610e751525SEric Saxe mcpu = &(cp->cpu_m);
10620e751525SEric Saxe mcpu->max_cstates = cpu_acpi_get_max_cstates(handle);
10630e751525SEric Saxe if (mcpu->max_cstates > CPU_ACPI_C1) {
1064cef70d2cSBill Holler (void) cstate_timer_callback(
1065cef70d2cSBill Holler CST_EVENT_MULTIPLE_CSTATES);
10660e751525SEric Saxe disp_enq_thread = cstate_wakeup;
10670e751525SEric Saxe cp->cpu_m.mcpu_idle_cpu = cpu_acpi_idle;
10680e751525SEric Saxe } else if (mcpu->max_cstates == CPU_ACPI_C1) {
10690e751525SEric Saxe disp_enq_thread = non_deep_idle_disp_enq_thread;
10700e751525SEric Saxe cp->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu;
1071cef70d2cSBill Holler (void) cstate_timer_callback(CST_EVENT_ONE_CSTATE);
10720e751525SEric Saxe }
10730e751525SEric Saxe
10740e751525SEric Saxe CPUSET_ATOMIC_XDEL(dom_cpu_set, cpu_id, result);
10750e751525SEric Saxe } while (result < 0);
1076444f66e7SMark Haywood mutex_exit(pm_lock);
10770e751525SEric Saxe #endif
10780e751525SEric Saxe }
10790e751525SEric Saxe
10800e751525SEric Saxe /*
10810e751525SEric Saxe * handle the number or the type of available processor power states change
10820e751525SEric Saxe */
10830e751525SEric Saxe void
cpuidle_manage_cstates(void * ctx)10840e751525SEric Saxe cpuidle_manage_cstates(void *ctx)
10850e751525SEric Saxe {
10860e751525SEric Saxe cpu_t *cp = ctx;
10870e751525SEric Saxe cpupm_mach_state_t *mach_state =
10880e751525SEric Saxe (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
10890e751525SEric Saxe boolean_t is_ready;
10900e751525SEric Saxe
10910e751525SEric Saxe if (mach_state == NULL) {
10920e751525SEric Saxe return;
10930e751525SEric Saxe }
10940e751525SEric Saxe
10950e751525SEric Saxe /*
10960e751525SEric Saxe * We currently refuse to power manage if the CPU is not ready to
10970e751525SEric Saxe * take cross calls (cross calls fail silently if CPU is not ready
10980e751525SEric Saxe * for it).
10990e751525SEric Saxe *
1100444f66e7SMark Haywood * Additionally, for x86 platforms we cannot power manage an instance,
1101444f66e7SMark Haywood * until it has been initialized.
11020e751525SEric Saxe */
1103444f66e7SMark Haywood is_ready = (cp->cpu_flags & CPU_READY) && cpupm_cstate_ready(cp);
11040e751525SEric Saxe if (!is_ready)
11050e751525SEric Saxe return;
11060e751525SEric Saxe
11070e751525SEric Saxe cpuidle_cstate_instance(cp);
11080e751525SEric Saxe }
1109