15db2f26eSSascha Wildner /*- 25db2f26eSSascha Wildner * Copyright (c) 2003-2005 Nate Lawson (SDG) 35db2f26eSSascha Wildner * Copyright (c) 2001 Michael Smith 45db2f26eSSascha Wildner * All rights reserved. 55db2f26eSSascha Wildner * 65db2f26eSSascha Wildner * Redistribution and use in source and binary forms, with or without 75db2f26eSSascha Wildner * modification, are permitted provided that the following conditions 85db2f26eSSascha Wildner * are met: 95db2f26eSSascha Wildner * 1. Redistributions of source code must retain the above copyright 105db2f26eSSascha Wildner * notice, this list of conditions and the following disclaimer. 115db2f26eSSascha Wildner * 2. Redistributions in binary form must reproduce the above copyright 125db2f26eSSascha Wildner * notice, this list of conditions and the following disclaimer in the 135db2f26eSSascha Wildner * documentation and/or other materials provided with the distribution. 145db2f26eSSascha Wildner * 155db2f26eSSascha Wildner * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 165db2f26eSSascha Wildner * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 175db2f26eSSascha Wildner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 185db2f26eSSascha Wildner * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 195db2f26eSSascha Wildner * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 205db2f26eSSascha Wildner * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 215db2f26eSSascha Wildner * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 225db2f26eSSascha Wildner * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 235db2f26eSSascha Wildner * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 245db2f26eSSascha Wildner * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 255db2f26eSSascha Wildner * SUCH DAMAGE. 265db2f26eSSascha Wildner * 275db2f26eSSascha Wildner * $FreeBSD: src/sys/dev/acpica/acpi_cpu.c,v 1.72 2008/04/12 12:06:00 rpaulo Exp $ 285db2f26eSSascha Wildner */ 295db2f26eSSascha Wildner 305db2f26eSSascha Wildner #include "opt_acpi.h" 315db2f26eSSascha Wildner #include <sys/param.h> 325db2f26eSSascha Wildner #include <sys/bus.h> 335db2f26eSSascha Wildner #include <sys/kernel.h> 345db2f26eSSascha Wildner #include <sys/malloc.h> 355db2f26eSSascha Wildner #include <sys/globaldata.h> 365db2f26eSSascha Wildner #include <sys/power.h> 375db2f26eSSascha Wildner #include <sys/proc.h> 385db2f26eSSascha Wildner #include <sys/sbuf.h> 395db2f26eSSascha Wildner #include <sys/thread2.h> 40b45624acSSepherosa Ziehau #include <sys/serialize.h> 41c241507cSSepherosa Ziehau #include <sys/msgport2.h> 425db2f26eSSascha Wildner 435db2f26eSSascha Wildner #include <bus/pci/pcivar.h> 445db2f26eSSascha Wildner #include <machine/atomic.h> 455db2f26eSSascha Wildner #include <machine/globaldata.h> 465db2f26eSSascha Wildner #include <machine/md_var.h> 475db2f26eSSascha Wildner #include <machine/smp.h> 485db2f26eSSascha Wildner #include <sys/rman.h> 495db2f26eSSascha Wildner 50c241507cSSepherosa Ziehau #include <net/netisr2.h> 51c241507cSSepherosa Ziehau #include <net/netmsg2.h> 52c241507cSSepherosa Ziehau #include <net/if_var.h> 53c241507cSSepherosa Ziehau 545db2f26eSSascha Wildner #include "acpi.h" 555db2f26eSSascha Wildner #include "acpivar.h" 565db2f26eSSascha Wildner #include "acpi_cpu.h" 575db2f26eSSascha Wildner 585db2f26eSSascha Wildner /* 595db2f26eSSascha Wildner * Support for ACPI Processor devices, including C[1-3] sleep states. 605db2f26eSSascha Wildner */ 615db2f26eSSascha Wildner 625db2f26eSSascha Wildner /* Hooks for the ACPI CA debugging infrastructure */ 635db2f26eSSascha Wildner #define _COMPONENT ACPI_PROCESSOR 645db2f26eSSascha Wildner ACPI_MODULE_NAME("PROCESSOR") 655db2f26eSSascha Wildner 66c241507cSSepherosa Ziehau struct netmsg_acpi_cst { 67c241507cSSepherosa Ziehau struct netmsg_base base; 68c241507cSSepherosa Ziehau struct acpi_cpu_softc *sc; 69c241507cSSepherosa Ziehau int val; 70c241507cSSepherosa Ziehau }; 71c241507cSSepherosa Ziehau 725db2f26eSSascha Wildner struct acpi_cx { 735db2f26eSSascha Wildner struct resource *p_lvlx; /* Register to read to enter state. */ 745db2f26eSSascha Wildner int rid; /* rid of p_lvlx */ 755db2f26eSSascha Wildner uint32_t type; /* C1-3 (C4 and up treated as C3). */ 765db2f26eSSascha Wildner uint32_t trans_lat; /* Transition latency (usec). */ 775db2f26eSSascha Wildner uint32_t power; /* Power consumed (mW). */ 785db2f26eSSascha Wildner int res_type; /* Resource type for p_lvlx. */ 795db2f26eSSascha Wildner }; 805db2f26eSSascha Wildner #define MAX_CX_STATES 8 815db2f26eSSascha Wildner 825db2f26eSSascha Wildner struct acpi_cpu_softc { 835db2f26eSSascha Wildner device_t cpu_dev; 845db2f26eSSascha Wildner struct acpi_cpux_softc *cpu_parent; 855db2f26eSSascha Wildner ACPI_HANDLE cpu_handle; 8672e8a1d8SSepherosa Ziehau int cpu_id; 8744806d43SSepherosa Ziehau uint32_t cst_flags; /* ACPI_CST_FLAG_ */ 885db2f26eSSascha Wildner uint32_t cpu_p_blk; /* ACPI P_BLK location */ 895db2f26eSSascha Wildner uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */ 905db2f26eSSascha Wildner struct acpi_cx cpu_cx_states[MAX_CX_STATES]; 915db2f26eSSascha Wildner int cpu_cx_count; /* Number of valid Cx states. */ 925db2f26eSSascha Wildner int cpu_prev_sleep;/* Last idle sleep duration. */ 935db2f26eSSascha Wildner /* Runtime state. */ 945db2f26eSSascha Wildner int cpu_non_c3; /* Index of lowest non-C3 state. */ 95fed0eeadSSepherosa Ziehau u_long cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */ 965db2f26eSSascha Wildner /* Values for sysctl. */ 971d730338SSepherosa Ziehau int cpu_cx_lowest; /* Current Cx lowest */ 981d730338SSepherosa Ziehau int cpu_cx_lowest_req; /* Requested Cx lowest */ 995db2f26eSSascha Wildner char cpu_cx_supported[64]; 1005db2f26eSSascha Wildner }; 1015db2f26eSSascha Wildner 10244806d43SSepherosa Ziehau #define ACPI_CST_FLAG_PROBING 0x1 10344806d43SSepherosa Ziehau 1045db2f26eSSascha Wildner struct acpi_cpu_device { 1055db2f26eSSascha Wildner struct resource_list ad_rl; 1065db2f26eSSascha Wildner }; 1075db2f26eSSascha Wildner 1085db2f26eSSascha Wildner #define CPU_GET_REG(reg, width) \ 1095db2f26eSSascha Wildner (bus_space_read_ ## width(rman_get_bustag((reg)), \ 1105db2f26eSSascha Wildner rman_get_bushandle((reg)), 0)) 1115db2f26eSSascha Wildner #define CPU_SET_REG(reg, width, val) \ 1125db2f26eSSascha Wildner (bus_space_write_ ## width(rman_get_bustag((reg)), \ 1135db2f26eSSascha Wildner rman_get_bushandle((reg)), 0, (val))) 1145db2f26eSSascha Wildner 1155db2f26eSSascha Wildner #define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */ 1165db2f26eSSascha Wildner 1175db2f26eSSascha Wildner #define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */ 1185db2f26eSSascha Wildner 1195db2f26eSSascha Wildner #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */ 1205db2f26eSSascha Wildner #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */ 1215db2f26eSSascha Wildner 1225db2f26eSSascha Wildner #define PCI_VENDOR_INTEL 0x8086 1235db2f26eSSascha Wildner #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ 1245db2f26eSSascha Wildner #define PCI_REVISION_A_STEP 0 1255db2f26eSSascha Wildner #define PCI_REVISION_B_STEP 1 1265db2f26eSSascha Wildner #define PCI_REVISION_4E 2 1275db2f26eSSascha Wildner #define PCI_REVISION_4M 3 1285db2f26eSSascha Wildner #define PIIX4_DEVACTB_REG 0x58 1295db2f26eSSascha Wildner #define PIIX4_BRLD_EN_IRQ0 (1<<0) 1305db2f26eSSascha Wildner #define PIIX4_BRLD_EN_IRQ (1<<1) 1315db2f26eSSascha Wildner #define PIIX4_BRLD_EN_IRQ8 (1<<5) 1325db2f26eSSascha Wildner #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8) 1335db2f26eSSascha Wildner #define PIIX4_PCNTRL_BST_EN (1<<10) 1345db2f26eSSascha Wildner 1355db2f26eSSascha Wildner /* Platform hardware resource information. */ 1365db2f26eSSascha Wildner static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */ 1375db2f26eSSascha Wildner static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */ 1385db2f26eSSascha Wildner static int cpu_quirks; /* Indicate any hardware bugs. */ 1395db2f26eSSascha Wildner 1405db2f26eSSascha Wildner /* Runtime state. */ 1415db2f26eSSascha Wildner static int cpu_disable_idle; /* Disable entry to idle function */ 1425db2f26eSSascha Wildner static int cpu_cx_count; /* Number of valid Cx states */ 1435db2f26eSSascha Wildner 1445db2f26eSSascha Wildner /* Values for sysctl. */ 1455db2f26eSSascha Wildner static int cpu_cx_generic; 1461d730338SSepherosa Ziehau static int cpu_cx_lowest; /* Current Cx lowest */ 1471d730338SSepherosa Ziehau static int cpu_cx_lowest_req; /* Requested Cx lowest */ 148b45624acSSepherosa Ziehau static struct lwkt_serialize cpu_cx_slize = LWKT_SERIALIZE_INITIALIZER; 1495db2f26eSSascha Wildner 1505db2f26eSSascha Wildner /* C3 state transition */ 1515db2f26eSSascha Wildner static int cpu_c3_ncpus; 1525db2f26eSSascha Wildner 1535db2f26eSSascha Wildner static device_t *cpu_devices; 1545db2f26eSSascha Wildner static int cpu_ndevices; 1555db2f26eSSascha Wildner static struct acpi_cpu_softc **cpu_softc; 1565db2f26eSSascha Wildner 1575db2f26eSSascha Wildner static int acpi_cpu_cst_probe(device_t dev); 1585db2f26eSSascha Wildner static int acpi_cpu_cst_attach(device_t dev); 1595db2f26eSSascha Wildner static int acpi_cpu_cst_suspend(device_t dev); 1605db2f26eSSascha Wildner static int acpi_cpu_cst_resume(device_t dev); 1615db2f26eSSascha Wildner static struct resource_list *acpi_cpu_cst_get_rlist(device_t dev, 1625db2f26eSSascha Wildner device_t child); 1635db2f26eSSascha Wildner static device_t acpi_cpu_cst_add_child(device_t bus, device_t parent, 1645db2f26eSSascha Wildner int order, const char *name, int unit); 1655db2f26eSSascha Wildner static int acpi_cpu_cst_read_ivar(device_t dev, device_t child, 1665db2f26eSSascha Wildner int index, uintptr_t *result); 1675db2f26eSSascha Wildner static int acpi_cpu_cst_shutdown(device_t dev); 1685db2f26eSSascha Wildner static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc); 1695db2f26eSSascha Wildner static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc); 1705db2f26eSSascha Wildner static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc); 17144806d43SSepherosa Ziehau static int acpi_cpu_cx_cst_dispatch(struct acpi_cpu_softc *sc); 1725db2f26eSSascha Wildner static void acpi_cpu_startup(void *arg); 1735db2f26eSSascha Wildner static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc); 1745db2f26eSSascha Wildner static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc); 1755db2f26eSSascha Wildner static void acpi_cpu_idle(void); 1765db2f26eSSascha Wildner static void acpi_cpu_cst_notify(device_t); 1775db2f26eSSascha Wildner static int acpi_cpu_quirks(void); 1785db2f26eSSascha Wildner static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS); 179c241507cSSepherosa Ziehau static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *, int); 180c241507cSSepherosa Ziehau static int acpi_cpu_set_cx_lowest_oncpu(struct acpi_cpu_softc *, int); 1815db2f26eSSascha Wildner static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 1821d730338SSepherosa Ziehau static int acpi_cpu_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS); 1835db2f26eSSascha Wildner static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 1841d730338SSepherosa Ziehau static int acpi_cpu_global_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS); 185febc8c49SSepherosa Ziehau static void acpi_cpu_cx_non_c3(struct acpi_cpu_softc *sc); 1864cf48621SSepherosa Ziehau static void acpi_cpu_global_cx_count(void); 1875db2f26eSSascha Wildner 1885db2f26eSSascha Wildner static void acpi_cpu_c1(void); /* XXX */ 1895db2f26eSSascha Wildner 1905db2f26eSSascha Wildner static device_method_t acpi_cpu_cst_methods[] = { 1915db2f26eSSascha Wildner /* Device interface */ 1925db2f26eSSascha Wildner DEVMETHOD(device_probe, acpi_cpu_cst_probe), 1935db2f26eSSascha Wildner DEVMETHOD(device_attach, acpi_cpu_cst_attach), 1945db2f26eSSascha Wildner DEVMETHOD(device_detach, bus_generic_detach), 1955db2f26eSSascha Wildner DEVMETHOD(device_shutdown, acpi_cpu_cst_shutdown), 1965db2f26eSSascha Wildner DEVMETHOD(device_suspend, acpi_cpu_cst_suspend), 1975db2f26eSSascha Wildner DEVMETHOD(device_resume, acpi_cpu_cst_resume), 1985db2f26eSSascha Wildner 1995db2f26eSSascha Wildner /* Bus interface */ 2005db2f26eSSascha Wildner DEVMETHOD(bus_add_child, acpi_cpu_cst_add_child), 2015db2f26eSSascha Wildner DEVMETHOD(bus_read_ivar, acpi_cpu_cst_read_ivar), 2025db2f26eSSascha Wildner DEVMETHOD(bus_get_resource_list, acpi_cpu_cst_get_rlist), 2035db2f26eSSascha Wildner DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 2045db2f26eSSascha Wildner DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), 2055db2f26eSSascha Wildner DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), 2065db2f26eSSascha Wildner DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), 2075db2f26eSSascha Wildner DEVMETHOD(bus_driver_added, bus_generic_driver_added), 2085db2f26eSSascha Wildner DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 2095db2f26eSSascha Wildner DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 2105db2f26eSSascha Wildner DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 2115db2f26eSSascha Wildner DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 212d3c9c58eSSascha Wildner DEVMETHOD_END 2135db2f26eSSascha Wildner }; 2145db2f26eSSascha Wildner 2155db2f26eSSascha Wildner static driver_t acpi_cpu_cst_driver = { 2165db2f26eSSascha Wildner "cpu_cst", 2175db2f26eSSascha Wildner acpi_cpu_cst_methods, 2185db2f26eSSascha Wildner sizeof(struct acpi_cpu_softc), 2195db2f26eSSascha Wildner }; 2205db2f26eSSascha Wildner 2215db2f26eSSascha Wildner static devclass_t acpi_cpu_cst_devclass; 2225db2f26eSSascha Wildner DRIVER_MODULE(cpu_cst, cpu, acpi_cpu_cst_driver, acpi_cpu_cst_devclass, NULL, NULL); 2235db2f26eSSascha Wildner MODULE_DEPEND(cpu_cst, acpi, 1, 1, 1); 2245db2f26eSSascha Wildner 2255db2f26eSSascha Wildner static int 2265db2f26eSSascha Wildner acpi_cpu_cst_probe(device_t dev) 2275db2f26eSSascha Wildner { 2285db2f26eSSascha Wildner int cpu_id; 2295db2f26eSSascha Wildner 2305db2f26eSSascha Wildner if (acpi_disabled("cpu_cst") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR) 2315db2f26eSSascha Wildner return (ENXIO); 2325db2f26eSSascha Wildner 2335db2f26eSSascha Wildner cpu_id = acpi_get_magic(dev); 2345db2f26eSSascha Wildner 2355db2f26eSSascha Wildner if (cpu_softc == NULL) 2365db2f26eSSascha Wildner cpu_softc = kmalloc(sizeof(struct acpi_cpu_softc *) * 2375db2f26eSSascha Wildner SMP_MAXCPU, M_TEMP /* XXX */, M_INTWAIT | M_ZERO); 2385db2f26eSSascha Wildner 2395db2f26eSSascha Wildner /* 2405db2f26eSSascha Wildner * Check if we already probed this processor. We scan the bus twice 2415db2f26eSSascha Wildner * so it's possible we've already seen this one. 2425db2f26eSSascha Wildner */ 2435db2f26eSSascha Wildner if (cpu_softc[cpu_id] != NULL) { 2445db2f26eSSascha Wildner device_printf(dev, "CPU%d cstate already exist\n", cpu_id); 2455db2f26eSSascha Wildner return (ENXIO); 2465db2f26eSSascha Wildner } 2475db2f26eSSascha Wildner 2485db2f26eSSascha Wildner /* Mark this processor as in-use and save our derived id for attach. */ 2495db2f26eSSascha Wildner cpu_softc[cpu_id] = (void *)1; 2505db2f26eSSascha Wildner device_set_desc(dev, "ACPI CPU C-State"); 2515db2f26eSSascha Wildner 2525db2f26eSSascha Wildner return (0); 2535db2f26eSSascha Wildner } 2545db2f26eSSascha Wildner 2555db2f26eSSascha Wildner static int 2565db2f26eSSascha Wildner acpi_cpu_cst_attach(device_t dev) 2575db2f26eSSascha Wildner { 2585db2f26eSSascha Wildner ACPI_BUFFER buf; 2595db2f26eSSascha Wildner ACPI_OBJECT *obj; 2605db2f26eSSascha Wildner struct acpi_cpu_softc *sc; 2615db2f26eSSascha Wildner ACPI_STATUS status; 2625db2f26eSSascha Wildner 2635db2f26eSSascha Wildner ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 2645db2f26eSSascha Wildner 2655db2f26eSSascha Wildner sc = device_get_softc(dev); 2665db2f26eSSascha Wildner sc->cpu_dev = dev; 2675db2f26eSSascha Wildner sc->cpu_parent = device_get_softc(device_get_parent(dev)); 2685db2f26eSSascha Wildner sc->cpu_handle = acpi_get_handle(dev); 26972e8a1d8SSepherosa Ziehau sc->cpu_id = acpi_get_magic(dev); 27072e8a1d8SSepherosa Ziehau cpu_softc[sc->cpu_id] = sc; 2715db2f26eSSascha Wildner cpu_smi_cmd = AcpiGbl_FADT.SmiCommand; 2725db2f26eSSascha Wildner cpu_cst_cnt = AcpiGbl_FADT.CstControl; 2735db2f26eSSascha Wildner 2745db2f26eSSascha Wildner buf.Pointer = NULL; 2755db2f26eSSascha Wildner buf.Length = ACPI_ALLOCATE_BUFFER; 2765db2f26eSSascha Wildner status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf); 2775db2f26eSSascha Wildner if (ACPI_FAILURE(status)) { 2785db2f26eSSascha Wildner device_printf(dev, "attach failed to get Processor obj - %s\n", 2795db2f26eSSascha Wildner AcpiFormatException(status)); 2805db2f26eSSascha Wildner return (ENXIO); 2815db2f26eSSascha Wildner } 2825db2f26eSSascha Wildner obj = (ACPI_OBJECT *)buf.Pointer; 2835db2f26eSSascha Wildner sc->cpu_p_blk = obj->Processor.PblkAddress; 2845db2f26eSSascha Wildner sc->cpu_p_blk_len = obj->Processor.PblkLength; 2855db2f26eSSascha Wildner AcpiOsFree(obj); 2865db2f26eSSascha Wildner ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n", 2875db2f26eSSascha Wildner device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len)); 2885db2f26eSSascha Wildner 2895db2f26eSSascha Wildner /* 2905db2f26eSSascha Wildner * If this is the first cpu we attach, create and initialize the generic 2915db2f26eSSascha Wildner * resources that will be used by all acpi cpu devices. 2925db2f26eSSascha Wildner */ 2935db2f26eSSascha Wildner if (device_get_unit(dev) == 0) { 2945db2f26eSSascha Wildner /* Assume we won't be using generic Cx mode by default */ 2955db2f26eSSascha Wildner cpu_cx_generic = FALSE; 2965db2f26eSSascha Wildner 2975db2f26eSSascha Wildner /* Queue post cpu-probing task handler */ 2985db2f26eSSascha Wildner AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL); 2995db2f26eSSascha Wildner } 3005db2f26eSSascha Wildner 3015db2f26eSSascha Wildner /* Probe for Cx state support. */ 3025db2f26eSSascha Wildner acpi_cpu_cx_probe(sc); 3035db2f26eSSascha Wildner 3045db2f26eSSascha Wildner /* Finally, call identify and probe/attach for child devices. */ 3055db2f26eSSascha Wildner bus_generic_probe(dev); 3065db2f26eSSascha Wildner bus_generic_attach(dev); 3075db2f26eSSascha Wildner 3085db2f26eSSascha Wildner return (0); 3095db2f26eSSascha Wildner } 3105db2f26eSSascha Wildner 3115db2f26eSSascha Wildner /* 3125db2f26eSSascha Wildner * Disable any entry to the idle function during suspend and re-enable it 3135db2f26eSSascha Wildner * during resume. 3145db2f26eSSascha Wildner */ 3155db2f26eSSascha Wildner static int 3165db2f26eSSascha Wildner acpi_cpu_cst_suspend(device_t dev) 3175db2f26eSSascha Wildner { 3185db2f26eSSascha Wildner int error; 3195db2f26eSSascha Wildner 3205db2f26eSSascha Wildner error = bus_generic_suspend(dev); 3215db2f26eSSascha Wildner if (error) 3225db2f26eSSascha Wildner return (error); 3235db2f26eSSascha Wildner cpu_disable_idle = TRUE; 3245db2f26eSSascha Wildner return (0); 3255db2f26eSSascha Wildner } 3265db2f26eSSascha Wildner 3275db2f26eSSascha Wildner static int 3285db2f26eSSascha Wildner acpi_cpu_cst_resume(device_t dev) 3295db2f26eSSascha Wildner { 3305db2f26eSSascha Wildner 3315db2f26eSSascha Wildner cpu_disable_idle = FALSE; 3325db2f26eSSascha Wildner return (bus_generic_resume(dev)); 3335db2f26eSSascha Wildner } 3345db2f26eSSascha Wildner 3355db2f26eSSascha Wildner static struct resource_list * 3365db2f26eSSascha Wildner acpi_cpu_cst_get_rlist(device_t dev, device_t child) 3375db2f26eSSascha Wildner { 3385db2f26eSSascha Wildner struct acpi_cpu_device *ad; 3395db2f26eSSascha Wildner 3405db2f26eSSascha Wildner ad = device_get_ivars(child); 3415db2f26eSSascha Wildner if (ad == NULL) 3425db2f26eSSascha Wildner return (NULL); 3435db2f26eSSascha Wildner return (&ad->ad_rl); 3445db2f26eSSascha Wildner } 3455db2f26eSSascha Wildner 3465db2f26eSSascha Wildner static device_t 3475db2f26eSSascha Wildner acpi_cpu_cst_add_child(device_t bus, device_t parent, int order, 3485db2f26eSSascha Wildner const char *name, int unit) 3495db2f26eSSascha Wildner { 3505db2f26eSSascha Wildner struct acpi_cpu_device *ad; 3515db2f26eSSascha Wildner device_t child; 3525db2f26eSSascha Wildner 3535db2f26eSSascha Wildner if ((ad = kmalloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL) 3545db2f26eSSascha Wildner return (NULL); 3555db2f26eSSascha Wildner 3565db2f26eSSascha Wildner resource_list_init(&ad->ad_rl); 3575db2f26eSSascha Wildner 3585db2f26eSSascha Wildner child = device_add_child_ordered(parent, order, name, unit); 3595db2f26eSSascha Wildner if (child != NULL) 3605db2f26eSSascha Wildner device_set_ivars(child, ad); 3615db2f26eSSascha Wildner else 3625db2f26eSSascha Wildner kfree(ad, M_TEMP); 3635db2f26eSSascha Wildner return (child); 3645db2f26eSSascha Wildner } 3655db2f26eSSascha Wildner 3665db2f26eSSascha Wildner static int 3675db2f26eSSascha Wildner acpi_cpu_cst_read_ivar(device_t dev, device_t child, int index, 3685db2f26eSSascha Wildner uintptr_t *result) 3695db2f26eSSascha Wildner { 3705db2f26eSSascha Wildner struct acpi_cpu_softc *sc; 3715db2f26eSSascha Wildner 3725db2f26eSSascha Wildner sc = device_get_softc(dev); 3735db2f26eSSascha Wildner switch (index) { 3745db2f26eSSascha Wildner case ACPI_IVAR_HANDLE: 3755db2f26eSSascha Wildner *result = (uintptr_t)sc->cpu_handle; 3765db2f26eSSascha Wildner break; 3775db2f26eSSascha Wildner #if 0 3785db2f26eSSascha Wildner case CPU_IVAR_PCPU: 3795db2f26eSSascha Wildner *result = (uintptr_t)sc->cpu_pcpu; 3805db2f26eSSascha Wildner break; 3815db2f26eSSascha Wildner #endif 3825db2f26eSSascha Wildner default: 3835db2f26eSSascha Wildner return (ENOENT); 3845db2f26eSSascha Wildner } 3855db2f26eSSascha Wildner return (0); 3865db2f26eSSascha Wildner } 3875db2f26eSSascha Wildner 3885db2f26eSSascha Wildner static int 3895db2f26eSSascha Wildner acpi_cpu_cst_shutdown(device_t dev) 3905db2f26eSSascha Wildner { 3915db2f26eSSascha Wildner ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 3925db2f26eSSascha Wildner 3935db2f26eSSascha Wildner /* Allow children to shutdown first. */ 3945db2f26eSSascha Wildner bus_generic_shutdown(dev); 3955db2f26eSSascha Wildner 3965db2f26eSSascha Wildner /* 3975db2f26eSSascha Wildner * Disable any entry to the idle function. There is a small race where 3985db2f26eSSascha Wildner * an idle thread have passed this check but not gone to sleep. This 3995db2f26eSSascha Wildner * is ok since device_shutdown() does not free the softc, otherwise 4005db2f26eSSascha Wildner * we'd have to be sure all threads were evicted before returning. 4015db2f26eSSascha Wildner */ 4025db2f26eSSascha Wildner cpu_disable_idle = TRUE; 4035db2f26eSSascha Wildner 4045db2f26eSSascha Wildner return_VALUE (0); 4055db2f26eSSascha Wildner } 4065db2f26eSSascha Wildner 4075db2f26eSSascha Wildner static void 4085db2f26eSSascha Wildner acpi_cpu_cx_probe(struct acpi_cpu_softc *sc) 4095db2f26eSSascha Wildner { 4105db2f26eSSascha Wildner ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 4115db2f26eSSascha Wildner 4125db2f26eSSascha Wildner /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 4135db2f26eSSascha Wildner sc->cpu_prev_sleep = 1000000; 4145db2f26eSSascha Wildner sc->cpu_cx_lowest = 0; 4151d730338SSepherosa Ziehau sc->cpu_cx_lowest_req = 0; 4165db2f26eSSascha Wildner 4175db2f26eSSascha Wildner /* 4185db2f26eSSascha Wildner * Check for the ACPI 2.0 _CST sleep states object. If we can't find 4195db2f26eSSascha Wildner * any, we'll revert to generic FADT/P_BLK Cx control method which will 4205db2f26eSSascha Wildner * be handled by acpi_cpu_startup. We need to defer to after having 4215db2f26eSSascha Wildner * probed all the cpus in the system before probing for generic Cx 4225db2f26eSSascha Wildner * states as we may already have found cpus with valid _CST packages 4235db2f26eSSascha Wildner */ 4245db2f26eSSascha Wildner if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) { 4255db2f26eSSascha Wildner /* 4265db2f26eSSascha Wildner * We were unable to find a _CST package for this cpu or there 4275db2f26eSSascha Wildner * was an error parsing it. Switch back to generic mode. 4285db2f26eSSascha Wildner */ 4295db2f26eSSascha Wildner cpu_cx_generic = TRUE; 4305db2f26eSSascha Wildner if (bootverbose) 4315db2f26eSSascha Wildner device_printf(sc->cpu_dev, "switching to generic Cx mode\n"); 4325db2f26eSSascha Wildner } 4335db2f26eSSascha Wildner 4345db2f26eSSascha Wildner /* 4355db2f26eSSascha Wildner * TODO: _CSD Package should be checked here. 4365db2f26eSSascha Wildner */ 4375db2f26eSSascha Wildner } 4385db2f26eSSascha Wildner 4395db2f26eSSascha Wildner static void 4405db2f26eSSascha Wildner acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc) 4415db2f26eSSascha Wildner { 4425db2f26eSSascha Wildner ACPI_GENERIC_ADDRESS gas; 4435db2f26eSSascha Wildner struct acpi_cx *cx_ptr; 4445db2f26eSSascha Wildner 4455db2f26eSSascha Wildner sc->cpu_cx_count = 0; 4465db2f26eSSascha Wildner cx_ptr = sc->cpu_cx_states; 4475db2f26eSSascha Wildner 4485db2f26eSSascha Wildner /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 4495db2f26eSSascha Wildner sc->cpu_prev_sleep = 1000000; 4505db2f26eSSascha Wildner 4515db2f26eSSascha Wildner /* C1 has been required since just after ACPI 1.0 */ 4525db2f26eSSascha Wildner cx_ptr->type = ACPI_STATE_C1; 4535db2f26eSSascha Wildner cx_ptr->trans_lat = 0; 4545db2f26eSSascha Wildner cx_ptr++; 4555db2f26eSSascha Wildner sc->cpu_cx_count++; 4565db2f26eSSascha Wildner 4575db2f26eSSascha Wildner /* 4585db2f26eSSascha Wildner * The spec says P_BLK must be 6 bytes long. However, some systems 4595db2f26eSSascha Wildner * use it to indicate a fractional set of features present so we 4605db2f26eSSascha Wildner * take 5 as C2. Some may also have a value of 7 to indicate 4615db2f26eSSascha Wildner * another C3 but most use _CST for this (as required) and having 4625db2f26eSSascha Wildner * "only" C1-C3 is not a hardship. 4635db2f26eSSascha Wildner */ 4645db2f26eSSascha Wildner if (sc->cpu_p_blk_len < 5) 4655db2f26eSSascha Wildner return; 4665db2f26eSSascha Wildner 4675db2f26eSSascha Wildner /* Validate and allocate resources for C2 (P_LVL2). */ 4685db2f26eSSascha Wildner gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 4695db2f26eSSascha Wildner gas.BitWidth = 8; 4705db2f26eSSascha Wildner if (AcpiGbl_FADT.C2Latency <= 100) { 4715db2f26eSSascha Wildner gas.Address = sc->cpu_p_blk + 4; 4725db2f26eSSascha Wildner 4735db2f26eSSascha Wildner cx_ptr->rid = sc->cpu_parent->cpux_next_rid; 474*6942a279SSepherosa Ziehau acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->type, &cx_ptr->rid, &gas, 475*6942a279SSepherosa Ziehau &cx_ptr->p_lvlx, RF_SHAREABLE); 4765db2f26eSSascha Wildner if (cx_ptr->p_lvlx != NULL) { 4775db2f26eSSascha Wildner sc->cpu_parent->cpux_next_rid++; 4785db2f26eSSascha Wildner cx_ptr->type = ACPI_STATE_C2; 4795db2f26eSSascha Wildner cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency; 4805db2f26eSSascha Wildner cx_ptr++; 4815db2f26eSSascha Wildner sc->cpu_cx_count++; 4826da68ca6SSepherosa Ziehau sc->cpu_non_c3 = 1; 4835db2f26eSSascha Wildner } 4845db2f26eSSascha Wildner } 4855db2f26eSSascha Wildner if (sc->cpu_p_blk_len < 6) 4865db2f26eSSascha Wildner return; 4875db2f26eSSascha Wildner 4885db2f26eSSascha Wildner /* Validate and allocate resources for C3 (P_LVL3). */ 4895db2f26eSSascha Wildner if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) { 4905db2f26eSSascha Wildner gas.Address = sc->cpu_p_blk + 5; 4915db2f26eSSascha Wildner 4925db2f26eSSascha Wildner cx_ptr->rid = sc->cpu_parent->cpux_next_rid; 4935db2f26eSSascha Wildner acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->type, &cx_ptr->rid, &gas, 4945db2f26eSSascha Wildner &cx_ptr->p_lvlx, RF_SHAREABLE); 4955db2f26eSSascha Wildner if (cx_ptr->p_lvlx != NULL) { 4965db2f26eSSascha Wildner sc->cpu_parent->cpux_next_rid++; 4975db2f26eSSascha Wildner cx_ptr->type = ACPI_STATE_C3; 4985db2f26eSSascha Wildner cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency; 4995db2f26eSSascha Wildner cx_ptr++; 5005db2f26eSSascha Wildner sc->cpu_cx_count++; 5015db2f26eSSascha Wildner } 5025db2f26eSSascha Wildner } 5035db2f26eSSascha Wildner } 5045db2f26eSSascha Wildner 5055db2f26eSSascha Wildner /* 5065db2f26eSSascha Wildner * Parse a _CST package and set up its Cx states. Since the _CST object 5075db2f26eSSascha Wildner * can change dynamically, our notify handler may call this function 5085db2f26eSSascha Wildner * to clean up and probe the new _CST package. 5095db2f26eSSascha Wildner */ 5105db2f26eSSascha Wildner static int 5115db2f26eSSascha Wildner acpi_cpu_cx_cst(struct acpi_cpu_softc *sc) 5125db2f26eSSascha Wildner { 5135db2f26eSSascha Wildner struct acpi_cx *cx_ptr; 5145db2f26eSSascha Wildner ACPI_STATUS status; 5155db2f26eSSascha Wildner ACPI_BUFFER buf; 5165db2f26eSSascha Wildner ACPI_OBJECT *top; 5175db2f26eSSascha Wildner ACPI_OBJECT *pkg; 5185db2f26eSSascha Wildner uint32_t count; 5195db2f26eSSascha Wildner int i; 5205db2f26eSSascha Wildner 5215db2f26eSSascha Wildner ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 5225db2f26eSSascha Wildner 5235db2f26eSSascha Wildner buf.Pointer = NULL; 5245db2f26eSSascha Wildner buf.Length = ACPI_ALLOCATE_BUFFER; 5255db2f26eSSascha Wildner status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf); 5265db2f26eSSascha Wildner if (ACPI_FAILURE(status)) 5275db2f26eSSascha Wildner return (ENXIO); 5285db2f26eSSascha Wildner 5295db2f26eSSascha Wildner /* _CST is a package with a count and at least one Cx package. */ 5305db2f26eSSascha Wildner top = (ACPI_OBJECT *)buf.Pointer; 5315db2f26eSSascha Wildner if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) { 5325db2f26eSSascha Wildner device_printf(sc->cpu_dev, "invalid _CST package\n"); 5335db2f26eSSascha Wildner AcpiOsFree(buf.Pointer); 5345db2f26eSSascha Wildner return (ENXIO); 5355db2f26eSSascha Wildner } 5365db2f26eSSascha Wildner if (count != top->Package.Count - 1) { 5375db2f26eSSascha Wildner device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n", 5385db2f26eSSascha Wildner count, top->Package.Count - 1); 5395db2f26eSSascha Wildner count = top->Package.Count - 1; 5405db2f26eSSascha Wildner } 5415db2f26eSSascha Wildner if (count > MAX_CX_STATES) { 5425db2f26eSSascha Wildner device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count); 5435db2f26eSSascha Wildner count = MAX_CX_STATES; 5445db2f26eSSascha Wildner } 5455db2f26eSSascha Wildner 54644806d43SSepherosa Ziehau sc->cst_flags |= ACPI_CST_FLAG_PROBING; 54744806d43SSepherosa Ziehau cpu_sfence(); 54844806d43SSepherosa Ziehau 5495db2f26eSSascha Wildner /* Set up all valid states. */ 5505db2f26eSSascha Wildner sc->cpu_cx_count = 0; 5515db2f26eSSascha Wildner cx_ptr = sc->cpu_cx_states; 5525db2f26eSSascha Wildner for (i = 0; i < count; i++) { 5535db2f26eSSascha Wildner pkg = &top->Package.Elements[i + 1]; 5545db2f26eSSascha Wildner if (!ACPI_PKG_VALID(pkg, 4) || 5555db2f26eSSascha Wildner acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 || 5565db2f26eSSascha Wildner acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 || 5575db2f26eSSascha Wildner acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) { 5585db2f26eSSascha Wildner 5595db2f26eSSascha Wildner device_printf(sc->cpu_dev, "skipping invalid Cx state package\n"); 5605db2f26eSSascha Wildner continue; 5615db2f26eSSascha Wildner } 5625db2f26eSSascha Wildner 5635db2f26eSSascha Wildner /* Validate the state to see if we should use it. */ 5645db2f26eSSascha Wildner switch (cx_ptr->type) { 5655db2f26eSSascha Wildner case ACPI_STATE_C1: 5665db2f26eSSascha Wildner sc->cpu_non_c3 = i; 5675db2f26eSSascha Wildner cx_ptr++; 5685db2f26eSSascha Wildner sc->cpu_cx_count++; 5695db2f26eSSascha Wildner continue; 5705db2f26eSSascha Wildner case ACPI_STATE_C2: 5715db2f26eSSascha Wildner sc->cpu_non_c3 = i; 5725db2f26eSSascha Wildner break; 5735db2f26eSSascha Wildner case ACPI_STATE_C3: 5745db2f26eSSascha Wildner default: 5755db2f26eSSascha Wildner if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) { 5765db2f26eSSascha Wildner 5775db2f26eSSascha Wildner ACPI_DEBUG_PRINT((ACPI_DB_INFO, 5785db2f26eSSascha Wildner "acpi_cpu%d: C3[%d] not available.\n", 5795db2f26eSSascha Wildner device_get_unit(sc->cpu_dev), i)); 5805db2f26eSSascha Wildner continue; 5815db2f26eSSascha Wildner } 5825db2f26eSSascha Wildner break; 5835db2f26eSSascha Wildner } 5845db2f26eSSascha Wildner 5855db2f26eSSascha Wildner #ifdef notyet 5865db2f26eSSascha Wildner /* Free up any previous register. */ 5875db2f26eSSascha Wildner if (cx_ptr->p_lvlx != NULL) { 5885db2f26eSSascha Wildner bus_release_resource(sc->cpu_dev, 0, 0, cx_ptr->p_lvlx); 5895db2f26eSSascha Wildner cx_ptr->p_lvlx = NULL; 5905db2f26eSSascha Wildner } 5915db2f26eSSascha Wildner #endif 5925db2f26eSSascha Wildner 5935db2f26eSSascha Wildner /* Allocate the control register for C2 or C3. */ 5945db2f26eSSascha Wildner cx_ptr->rid = sc->cpu_parent->cpux_next_rid; 595*6942a279SSepherosa Ziehau acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &cx_ptr->rid, 596*6942a279SSepherosa Ziehau &cx_ptr->p_lvlx, RF_SHAREABLE); 5975db2f26eSSascha Wildner if (cx_ptr->p_lvlx) { 5985db2f26eSSascha Wildner sc->cpu_parent->cpux_next_rid++; 5995db2f26eSSascha Wildner ACPI_DEBUG_PRINT((ACPI_DB_INFO, 6005db2f26eSSascha Wildner "acpi_cpu%d: Got C%d - %d latency\n", 6015db2f26eSSascha Wildner device_get_unit(sc->cpu_dev), cx_ptr->type, 6025db2f26eSSascha Wildner cx_ptr->trans_lat)); 6035db2f26eSSascha Wildner cx_ptr++; 6045db2f26eSSascha Wildner sc->cpu_cx_count++; 6055db2f26eSSascha Wildner } 6065db2f26eSSascha Wildner } 6075db2f26eSSascha Wildner AcpiOsFree(buf.Pointer); 6085db2f26eSSascha Wildner 6091d730338SSepherosa Ziehau /* 6101d730338SSepherosa Ziehau * Fix up the lowest Cx being used 6111d730338SSepherosa Ziehau */ 6121d730338SSepherosa Ziehau if (sc->cpu_cx_lowest_req < sc->cpu_cx_count) 6131d730338SSepherosa Ziehau sc->cpu_cx_lowest = sc->cpu_cx_lowest_req; 6141d730338SSepherosa Ziehau if (sc->cpu_cx_lowest > sc->cpu_cx_count - 1) 6151d730338SSepherosa Ziehau sc->cpu_cx_lowest = sc->cpu_cx_count - 1; 6161d730338SSepherosa Ziehau 617febc8c49SSepherosa Ziehau /* 618febc8c49SSepherosa Ziehau * Cache the lowest non-C3 state. 619febc8c49SSepherosa Ziehau * NOTE: must after cpu_cx_lowest is set. 620febc8c49SSepherosa Ziehau */ 621febc8c49SSepherosa Ziehau acpi_cpu_cx_non_c3(sc); 622febc8c49SSepherosa Ziehau 62344806d43SSepherosa Ziehau cpu_sfence(); 62444806d43SSepherosa Ziehau sc->cst_flags &= ~ACPI_CST_FLAG_PROBING; 62544806d43SSepherosa Ziehau 6265db2f26eSSascha Wildner return (0); 6275db2f26eSSascha Wildner } 6285db2f26eSSascha Wildner 62944806d43SSepherosa Ziehau static void 63044806d43SSepherosa Ziehau acpi_cst_probe_handler(netmsg_t msg) 63144806d43SSepherosa Ziehau { 63244806d43SSepherosa Ziehau struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg; 63344806d43SSepherosa Ziehau int error; 63444806d43SSepherosa Ziehau 63544806d43SSepherosa Ziehau error = acpi_cpu_cx_cst(rmsg->sc); 63644806d43SSepherosa Ziehau lwkt_replymsg(&rmsg->base.lmsg, error); 63744806d43SSepherosa Ziehau } 63844806d43SSepherosa Ziehau 63944806d43SSepherosa Ziehau static int 64044806d43SSepherosa Ziehau acpi_cpu_cx_cst_dispatch(struct acpi_cpu_softc *sc) 64144806d43SSepherosa Ziehau { 64244806d43SSepherosa Ziehau struct netmsg_acpi_cst msg; 64344806d43SSepherosa Ziehau 64444806d43SSepherosa Ziehau netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 64544806d43SSepherosa Ziehau acpi_cst_probe_handler); 64644806d43SSepherosa Ziehau msg.sc = sc; 64744806d43SSepherosa Ziehau 64844806d43SSepherosa Ziehau return lwkt_domsg(netisr_cpuport(sc->cpu_id), &msg.base.lmsg, 0); 64944806d43SSepherosa Ziehau } 65044806d43SSepherosa Ziehau 6515db2f26eSSascha Wildner /* 6525db2f26eSSascha Wildner * Call this *after* all CPUs have been attached. 6535db2f26eSSascha Wildner */ 6545db2f26eSSascha Wildner static void 6555db2f26eSSascha Wildner acpi_cpu_startup(void *arg) 6565db2f26eSSascha Wildner { 6575db2f26eSSascha Wildner struct acpi_cpu_softc *sc; 6585db2f26eSSascha Wildner int i; 6595db2f26eSSascha Wildner 6605db2f26eSSascha Wildner /* Get set of CPU devices */ 6615db2f26eSSascha Wildner devclass_get_devices(acpi_cpu_cst_devclass, &cpu_devices, &cpu_ndevices); 6625db2f26eSSascha Wildner 6635db2f26eSSascha Wildner /* 6645db2f26eSSascha Wildner * Setup any quirks that might necessary now that we have probed 6655db2f26eSSascha Wildner * all the CPUs 6665db2f26eSSascha Wildner */ 6675db2f26eSSascha Wildner acpi_cpu_quirks(); 6685db2f26eSSascha Wildner 6695db2f26eSSascha Wildner if (cpu_cx_generic) { 6705db2f26eSSascha Wildner /* 6715db2f26eSSascha Wildner * We are using generic Cx mode, probe for available Cx states 6725db2f26eSSascha Wildner * for all processors. 6735db2f26eSSascha Wildner */ 6745db2f26eSSascha Wildner for (i = 0; i < cpu_ndevices; i++) { 6755db2f26eSSascha Wildner sc = device_get_softc(cpu_devices[i]); 6765db2f26eSSascha Wildner acpi_cpu_generic_cx_probe(sc); 6775db2f26eSSascha Wildner } 6785db2f26eSSascha Wildner } else { 6795db2f26eSSascha Wildner /* 6805db2f26eSSascha Wildner * We are using _CST mode, remove C3 state if necessary. 6814cf48621SSepherosa Ziehau * 6825db2f26eSSascha Wildner * As we now know for sure that we will be using _CST mode 6835db2f26eSSascha Wildner * install our notify handler. 6845db2f26eSSascha Wildner */ 6855db2f26eSSascha Wildner for (i = 0; i < cpu_ndevices; i++) { 6865db2f26eSSascha Wildner sc = device_get_softc(cpu_devices[i]); 687ff830b69SSepherosa Ziehau if (cpu_quirks & CPU_QUIRK_NO_C3) 6885db2f26eSSascha Wildner sc->cpu_cx_count = sc->cpu_non_c3 + 1; 6895db2f26eSSascha Wildner sc->cpu_parent->cpux_cst_notify = acpi_cpu_cst_notify; 6905db2f26eSSascha Wildner } 6915db2f26eSSascha Wildner } 6924cf48621SSepherosa Ziehau acpi_cpu_global_cx_count(); 6935db2f26eSSascha Wildner 6945db2f26eSSascha Wildner /* Perform Cx final initialization. */ 6955db2f26eSSascha Wildner for (i = 0; i < cpu_ndevices; i++) { 6965db2f26eSSascha Wildner sc = device_get_softc(cpu_devices[i]); 6975db2f26eSSascha Wildner acpi_cpu_startup_cx(sc); 6985db2f26eSSascha Wildner 6995db2f26eSSascha Wildner if (sc->cpu_parent->glob_sysctl_tree != NULL) { 7005db2f26eSSascha Wildner struct acpi_cpux_softc *cpux = sc->cpu_parent; 7015db2f26eSSascha Wildner 7025db2f26eSSascha Wildner /* Add a sysctl handler to handle global Cx lowest setting */ 7035db2f26eSSascha Wildner SYSCTL_ADD_PROC(&cpux->glob_sysctl_ctx, 7045db2f26eSSascha Wildner SYSCTL_CHILDREN(cpux->glob_sysctl_tree), 7055db2f26eSSascha Wildner OID_AUTO, "cx_lowest", 7065db2f26eSSascha Wildner CTLTYPE_STRING | CTLFLAG_RW, NULL, 0, 7075db2f26eSSascha Wildner acpi_cpu_global_cx_lowest_sysctl, "A", 7081d730338SSepherosa Ziehau "Requested global lowest Cx sleep state"); 7091d730338SSepherosa Ziehau SYSCTL_ADD_PROC(&cpux->glob_sysctl_ctx, 7101d730338SSepherosa Ziehau SYSCTL_CHILDREN(cpux->glob_sysctl_tree), 7111d730338SSepherosa Ziehau OID_AUTO, "cx_lowest_use", 7121d730338SSepherosa Ziehau CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 7131d730338SSepherosa Ziehau acpi_cpu_global_cx_lowest_use_sysctl, "A", 7145db2f26eSSascha Wildner "Global lowest Cx sleep state to use"); 7155db2f26eSSascha Wildner } 7165db2f26eSSascha Wildner } 7175db2f26eSSascha Wildner 7185db2f26eSSascha Wildner /* Take over idling from cpu_idle_default(). */ 7195db2f26eSSascha Wildner cpu_cx_lowest = 0; 7201d730338SSepherosa Ziehau cpu_cx_lowest_req = 0; 7215db2f26eSSascha Wildner cpu_disable_idle = FALSE; 7225db2f26eSSascha Wildner cpu_idle_hook = acpi_cpu_idle; 7235db2f26eSSascha Wildner } 7245db2f26eSSascha Wildner 7255db2f26eSSascha Wildner static void 7265db2f26eSSascha Wildner acpi_cpu_cx_list(struct acpi_cpu_softc *sc) 7275db2f26eSSascha Wildner { 7285db2f26eSSascha Wildner struct sbuf sb; 7295db2f26eSSascha Wildner int i; 7305db2f26eSSascha Wildner 7315db2f26eSSascha Wildner /* 7325db2f26eSSascha Wildner * Set up the list of Cx states 7335db2f26eSSascha Wildner */ 7345db2f26eSSascha Wildner sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported), 7355db2f26eSSascha Wildner SBUF_FIXEDLEN); 7366da68ca6SSepherosa Ziehau for (i = 0; i < sc->cpu_cx_count; i++) 7375db2f26eSSascha Wildner sbuf_printf(&sb, "C%d/%d ", i + 1, sc->cpu_cx_states[i].trans_lat); 7385db2f26eSSascha Wildner sbuf_trim(&sb); 7395db2f26eSSascha Wildner sbuf_finish(&sb); 7405db2f26eSSascha Wildner } 7415db2f26eSSascha Wildner 7425db2f26eSSascha Wildner static void 7435db2f26eSSascha Wildner acpi_cpu_startup_cx(struct acpi_cpu_softc *sc) 7445db2f26eSSascha Wildner { 7455db2f26eSSascha Wildner struct acpi_cpux_softc *cpux = sc->cpu_parent; 7465db2f26eSSascha Wildner 7475db2f26eSSascha Wildner acpi_cpu_cx_list(sc); 7485db2f26eSSascha Wildner 7495db2f26eSSascha Wildner SYSCTL_ADD_STRING(&cpux->pcpu_sysctl_ctx, 7505db2f26eSSascha Wildner SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree), 7515db2f26eSSascha Wildner OID_AUTO, "cx_supported", CTLFLAG_RD, 7525db2f26eSSascha Wildner sc->cpu_cx_supported, 0, 7535db2f26eSSascha Wildner "Cx/microsecond values for supported Cx states"); 7545db2f26eSSascha Wildner SYSCTL_ADD_PROC(&cpux->pcpu_sysctl_ctx, 7555db2f26eSSascha Wildner SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree), 7565db2f26eSSascha Wildner OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, 7575db2f26eSSascha Wildner (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A", 7581d730338SSepherosa Ziehau "requested lowest Cx sleep state"); 7591d730338SSepherosa Ziehau SYSCTL_ADD_PROC(&cpux->pcpu_sysctl_ctx, 7601d730338SSepherosa Ziehau SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree), 7611d730338SSepherosa Ziehau OID_AUTO, "cx_lowest_use", CTLTYPE_STRING | CTLFLAG_RD, 7621d730338SSepherosa Ziehau (void *)sc, 0, acpi_cpu_cx_lowest_use_sysctl, "A", 7635db2f26eSSascha Wildner "lowest Cx sleep state to use"); 7645db2f26eSSascha Wildner SYSCTL_ADD_PROC(&cpux->pcpu_sysctl_ctx, 7655db2f26eSSascha Wildner SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree), 7665db2f26eSSascha Wildner OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD, 7675db2f26eSSascha Wildner (void *)sc, 0, acpi_cpu_usage_sysctl, "A", 7685db2f26eSSascha Wildner "percent usage for each Cx state"); 7695db2f26eSSascha Wildner 7705db2f26eSSascha Wildner #ifdef notyet 7715db2f26eSSascha Wildner /* Signal platform that we can handle _CST notification. */ 7725db2f26eSSascha Wildner if (!cpu_cx_generic && cpu_cst_cnt != 0) { 7735db2f26eSSascha Wildner ACPI_LOCK(acpi); 7745db2f26eSSascha Wildner AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8); 7755db2f26eSSascha Wildner ACPI_UNLOCK(acpi); 7765db2f26eSSascha Wildner } 7775db2f26eSSascha Wildner #endif 7785db2f26eSSascha Wildner } 7795db2f26eSSascha Wildner 7805db2f26eSSascha Wildner /* 7815db2f26eSSascha Wildner * Idle the CPU in the lowest state possible. This function is called with 7825db2f26eSSascha Wildner * interrupts disabled. Note that once it re-enables interrupts, a task 7835db2f26eSSascha Wildner * switch can occur so do not access shared data (i.e. the softc) after 7845db2f26eSSascha Wildner * interrupts are re-enabled. 7855db2f26eSSascha Wildner */ 7865db2f26eSSascha Wildner static void 7875db2f26eSSascha Wildner acpi_cpu_idle(void) 7885db2f26eSSascha Wildner { 7895db2f26eSSascha Wildner struct acpi_cpu_softc *sc; 7905db2f26eSSascha Wildner struct acpi_cx *cx_next; 7915db2f26eSSascha Wildner uint64_t start_time, end_time; 7925db2f26eSSascha Wildner int bm_active, cx_next_idx, i; 7935db2f26eSSascha Wildner 7945db2f26eSSascha Wildner /* If disabled, return immediately. */ 7955db2f26eSSascha Wildner if (cpu_disable_idle) { 7965db2f26eSSascha Wildner ACPI_ENABLE_IRQS(); 7975db2f26eSSascha Wildner return; 7985db2f26eSSascha Wildner } 7995db2f26eSSascha Wildner 8005db2f26eSSascha Wildner /* 8015db2f26eSSascha Wildner * Look up our CPU id to get our softc. If it's NULL, we'll use C1 8025db2f26eSSascha Wildner * since there is no ACPI processor object for this CPU. This occurs 8035db2f26eSSascha Wildner * for logical CPUs in the HTT case. 8045db2f26eSSascha Wildner */ 8055db2f26eSSascha Wildner sc = cpu_softc[mdcpu->mi.gd_cpuid]; 8065db2f26eSSascha Wildner if (sc == NULL) { 8075db2f26eSSascha Wildner acpi_cpu_c1(); 8085db2f26eSSascha Wildner return; 8095db2f26eSSascha Wildner } 8105db2f26eSSascha Wildner 81144806d43SSepherosa Ziehau /* Still probing; use C1 */ 81244806d43SSepherosa Ziehau if (sc->cst_flags & ACPI_CST_FLAG_PROBING) { 81344806d43SSepherosa Ziehau acpi_cpu_c1(); 81444806d43SSepherosa Ziehau return; 81544806d43SSepherosa Ziehau } 81644806d43SSepherosa Ziehau 8175db2f26eSSascha Wildner /* Find the lowest state that has small enough latency. */ 8185db2f26eSSascha Wildner cx_next_idx = 0; 8195db2f26eSSascha Wildner for (i = sc->cpu_cx_lowest; i >= 0; i--) { 8205db2f26eSSascha Wildner if (sc->cpu_cx_states[i].trans_lat * 3 <= sc->cpu_prev_sleep) { 8215db2f26eSSascha Wildner cx_next_idx = i; 8225db2f26eSSascha Wildner break; 8235db2f26eSSascha Wildner } 8245db2f26eSSascha Wildner } 8255db2f26eSSascha Wildner 8265db2f26eSSascha Wildner /* 8275db2f26eSSascha Wildner * Check for bus master activity. If there was activity, clear 8285db2f26eSSascha Wildner * the bit and use the lowest non-C3 state. Note that the USB 8295db2f26eSSascha Wildner * driver polling for new devices keeps this bit set all the 8305db2f26eSSascha Wildner * time if USB is loaded. 8315db2f26eSSascha Wildner */ 8325db2f26eSSascha Wildner if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 8335db2f26eSSascha Wildner AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active); 8345db2f26eSSascha Wildner if (bm_active != 0) { 8355db2f26eSSascha Wildner AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); 8365db2f26eSSascha Wildner cx_next_idx = min(cx_next_idx, sc->cpu_non_c3); 8375db2f26eSSascha Wildner } 8385db2f26eSSascha Wildner } 8395db2f26eSSascha Wildner 8405db2f26eSSascha Wildner /* Select the next state and update statistics. */ 8415db2f26eSSascha Wildner cx_next = &sc->cpu_cx_states[cx_next_idx]; 8425db2f26eSSascha Wildner sc->cpu_cx_stats[cx_next_idx]++; 8435db2f26eSSascha Wildner KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep")); 8445db2f26eSSascha Wildner 8455db2f26eSSascha Wildner /* 8465db2f26eSSascha Wildner * Execute HLT (or equivalent) and wait for an interrupt. We can't 8475db2f26eSSascha Wildner * calculate the time spent in C1 since the place we wake up is an 8485db2f26eSSascha Wildner * ISR. Assume we slept half of quantum and return. 8495db2f26eSSascha Wildner */ 8505db2f26eSSascha Wildner if (cx_next->type == ACPI_STATE_C1) { 8515db2f26eSSascha Wildner sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + 500000 / hz) / 4; 8525db2f26eSSascha Wildner acpi_cpu_c1(); 8535db2f26eSSascha Wildner return; 8545db2f26eSSascha Wildner } 8555db2f26eSSascha Wildner 8565db2f26eSSascha Wildner /* 857b42fff25SSepherosa Ziehau * For C3(+), disable bus master arbitration and enable bus master wake 8585db2f26eSSascha Wildner * if BM control is available, otherwise flush the CPU cache. 8595db2f26eSSascha Wildner */ 860b42fff25SSepherosa Ziehau if (cx_next->type >= ACPI_STATE_C3) { 8615db2f26eSSascha Wildner if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 8625db2f26eSSascha Wildner AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); 8635db2f26eSSascha Wildner AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 8645db2f26eSSascha Wildner } else 8655db2f26eSSascha Wildner ACPI_FLUSH_CPU_CACHE(); 8665db2f26eSSascha Wildner } 8675db2f26eSSascha Wildner 8685db2f26eSSascha Wildner /* 8695db2f26eSSascha Wildner * Read from P_LVLx to enter C2(+), checking time spent asleep. 8705db2f26eSSascha Wildner * Use the ACPI timer for measuring sleep time. Since we need to 8715db2f26eSSascha Wildner * get the time very close to the CPU start/stop clock logic, this 8725db2f26eSSascha Wildner * is the only reliable time source. 8735db2f26eSSascha Wildner */ 8745db2f26eSSascha Wildner AcpiRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock); 8755db2f26eSSascha Wildner CPU_GET_REG(cx_next->p_lvlx, 1); 8765db2f26eSSascha Wildner 8775db2f26eSSascha Wildner /* 8785db2f26eSSascha Wildner * Read the end time twice. Since it may take an arbitrary time 8795db2f26eSSascha Wildner * to enter the idle state, the first read may be executed before 8805db2f26eSSascha Wildner * the processor has stopped. Doing it again provides enough 8815db2f26eSSascha Wildner * margin that we are certain to have a correct value. 8825db2f26eSSascha Wildner */ 8835db2f26eSSascha Wildner AcpiRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock); 8845db2f26eSSascha Wildner AcpiRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock); 8855db2f26eSSascha Wildner 8865db2f26eSSascha Wildner /* Enable bus master arbitration and disable bus master wakeup. */ 887b42fff25SSepherosa Ziehau if (cx_next->type >= ACPI_STATE_C3) { 8885db2f26eSSascha Wildner if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 8895db2f26eSSascha Wildner AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); 8905db2f26eSSascha Wildner AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 8915db2f26eSSascha Wildner } 8925db2f26eSSascha Wildner } 8935db2f26eSSascha Wildner ACPI_ENABLE_IRQS(); 8945db2f26eSSascha Wildner 8955db2f26eSSascha Wildner /* Find the actual time asleep in microseconds. */ 8965db2f26eSSascha Wildner end_time = acpi_TimerDelta(end_time, start_time); 8975db2f26eSSascha Wildner sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4; 8985db2f26eSSascha Wildner } 8995db2f26eSSascha Wildner 9005db2f26eSSascha Wildner /* 9015db2f26eSSascha Wildner * Re-evaluate the _CST object when we are notified that it changed. 9025db2f26eSSascha Wildner */ 9035db2f26eSSascha Wildner static void 9045db2f26eSSascha Wildner acpi_cpu_cst_notify(device_t dev) 9055db2f26eSSascha Wildner { 9065db2f26eSSascha Wildner struct acpi_cpu_softc *sc = device_get_softc(dev); 9075db2f26eSSascha Wildner 908b45624acSSepherosa Ziehau KASSERT(curthread->td_type != TD_TYPE_NETISR, 909b45624acSSepherosa Ziehau ("notify in netisr%d", mycpuid)); 910b45624acSSepherosa Ziehau 911b45624acSSepherosa Ziehau lwkt_serialize_enter(&cpu_cx_slize); 912b45624acSSepherosa Ziehau 9135db2f26eSSascha Wildner /* Update the list of Cx states. */ 91444806d43SSepherosa Ziehau acpi_cpu_cx_cst_dispatch(sc); 9155db2f26eSSascha Wildner acpi_cpu_cx_list(sc); 9165db2f26eSSascha Wildner 9175db2f26eSSascha Wildner /* Update the new lowest useable Cx state for all CPUs. */ 9184cf48621SSepherosa Ziehau acpi_cpu_global_cx_count(); 9191d730338SSepherosa Ziehau 9201d730338SSepherosa Ziehau /* 9211d730338SSepherosa Ziehau * Fix up the lowest Cx being used 9221d730338SSepherosa Ziehau */ 9231d730338SSepherosa Ziehau if (cpu_cx_lowest_req < cpu_cx_count) 9241d730338SSepherosa Ziehau cpu_cx_lowest = cpu_cx_lowest_req; 9251d730338SSepherosa Ziehau if (cpu_cx_lowest > cpu_cx_count - 1) 9261d730338SSepherosa Ziehau cpu_cx_lowest = cpu_cx_count - 1; 9271d730338SSepherosa Ziehau 928b45624acSSepherosa Ziehau lwkt_serialize_exit(&cpu_cx_slize); 9295db2f26eSSascha Wildner } 9305db2f26eSSascha Wildner 9315db2f26eSSascha Wildner static int 9325db2f26eSSascha Wildner acpi_cpu_quirks(void) 9335db2f26eSSascha Wildner { 9345db2f26eSSascha Wildner device_t acpi_dev; 9355db2f26eSSascha Wildner uint32_t val; 9365db2f26eSSascha Wildner 9375db2f26eSSascha Wildner ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 9385db2f26eSSascha Wildner 9395db2f26eSSascha Wildner /* 9405db2f26eSSascha Wildner * Bus mastering arbitration control is needed to keep caches coherent 9415db2f26eSSascha Wildner * while sleeping in C3. If it's not present but a working flush cache 9425db2f26eSSascha Wildner * instruction is present, flush the caches before entering C3 instead. 9435db2f26eSSascha Wildner * Otherwise, just disable C3 completely. 9445db2f26eSSascha Wildner */ 9455db2f26eSSascha Wildner if (AcpiGbl_FADT.Pm2ControlBlock == 0 || 9465db2f26eSSascha Wildner AcpiGbl_FADT.Pm2ControlLength == 0) { 9475db2f26eSSascha Wildner if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) && 9485db2f26eSSascha Wildner (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) { 9495db2f26eSSascha Wildner cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; 9505db2f26eSSascha Wildner ACPI_DEBUG_PRINT((ACPI_DB_INFO, 9515db2f26eSSascha Wildner "acpi_cpu: no BM control, using flush cache method\n")); 9525db2f26eSSascha Wildner } else { 9535db2f26eSSascha Wildner cpu_quirks |= CPU_QUIRK_NO_C3; 9545db2f26eSSascha Wildner ACPI_DEBUG_PRINT((ACPI_DB_INFO, 9555db2f26eSSascha Wildner "acpi_cpu: no BM control, C3 not available\n")); 9565db2f26eSSascha Wildner } 9575db2f26eSSascha Wildner } 9585db2f26eSSascha Wildner 9595db2f26eSSascha Wildner /* 9605db2f26eSSascha Wildner * If we are using generic Cx mode, C3 on multiple CPUs requires using 9615db2f26eSSascha Wildner * the expensive flush cache instruction. 9625db2f26eSSascha Wildner */ 9635db2f26eSSascha Wildner if (cpu_cx_generic && ncpus > 1) { 9645db2f26eSSascha Wildner cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; 9655db2f26eSSascha Wildner ACPI_DEBUG_PRINT((ACPI_DB_INFO, 9665db2f26eSSascha Wildner "acpi_cpu: SMP, using flush cache mode for C3\n")); 9675db2f26eSSascha Wildner } 9685db2f26eSSascha Wildner 9695db2f26eSSascha Wildner /* Look for various quirks of the PIIX4 part. */ 9705db2f26eSSascha Wildner acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); 9715db2f26eSSascha Wildner if (acpi_dev != NULL) { 9725db2f26eSSascha Wildner switch (pci_get_revid(acpi_dev)) { 9735db2f26eSSascha Wildner /* 9745db2f26eSSascha Wildner * Disable C3 support for all PIIX4 chipsets. Some of these parts 9755db2f26eSSascha Wildner * do not report the BMIDE status to the BM status register and 9765db2f26eSSascha Wildner * others have a livelock bug if Type-F DMA is enabled. Linux 9775db2f26eSSascha Wildner * works around the BMIDE bug by reading the BM status directly 9785db2f26eSSascha Wildner * but we take the simpler approach of disabling C3 for these 9795db2f26eSSascha Wildner * parts. 9805db2f26eSSascha Wildner * 9815db2f26eSSascha Wildner * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 9825db2f26eSSascha Wildner * Livelock") from the January 2002 PIIX4 specification update. 9835db2f26eSSascha Wildner * Applies to all PIIX4 models. 9845db2f26eSSascha Wildner * 9855db2f26eSSascha Wildner * Also, make sure that all interrupts cause a "Stop Break" 9865db2f26eSSascha Wildner * event to exit from C2 state. 9875db2f26eSSascha Wildner * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak) 9885db2f26eSSascha Wildner * should be set to zero, otherwise it causes C2 to short-sleep. 9895db2f26eSSascha Wildner * PIIX4 doesn't properly support C3 and bus master activity 9905db2f26eSSascha Wildner * need not break out of C2. 9915db2f26eSSascha Wildner */ 9925db2f26eSSascha Wildner case PCI_REVISION_A_STEP: 9935db2f26eSSascha Wildner case PCI_REVISION_B_STEP: 9945db2f26eSSascha Wildner case PCI_REVISION_4E: 9955db2f26eSSascha Wildner case PCI_REVISION_4M: 9965db2f26eSSascha Wildner cpu_quirks |= CPU_QUIRK_NO_C3; 9975db2f26eSSascha Wildner ACPI_DEBUG_PRINT((ACPI_DB_INFO, 9985db2f26eSSascha Wildner "acpi_cpu: working around PIIX4 bug, disabling C3\n")); 9995db2f26eSSascha Wildner 10005db2f26eSSascha Wildner val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4); 10015db2f26eSSascha Wildner if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) { 10025db2f26eSSascha Wildner ACPI_DEBUG_PRINT((ACPI_DB_INFO, 10035db2f26eSSascha Wildner "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n")); 10045db2f26eSSascha Wildner val |= PIIX4_STOP_BREAK_MASK; 10055db2f26eSSascha Wildner pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4); 10065db2f26eSSascha Wildner } 10075db2f26eSSascha Wildner AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val); 10085db2f26eSSascha Wildner if (val) { 10095db2f26eSSascha Wildner ACPI_DEBUG_PRINT((ACPI_DB_INFO, 10105db2f26eSSascha Wildner "acpi_cpu: PIIX4: reset BRLD_EN_BM\n")); 10115db2f26eSSascha Wildner AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 10125db2f26eSSascha Wildner } 10135db2f26eSSascha Wildner break; 10145db2f26eSSascha Wildner default: 10155db2f26eSSascha Wildner break; 10165db2f26eSSascha Wildner } 10175db2f26eSSascha Wildner } 10185db2f26eSSascha Wildner 10195db2f26eSSascha Wildner return (0); 10205db2f26eSSascha Wildner } 10215db2f26eSSascha Wildner 10225db2f26eSSascha Wildner static int 10235db2f26eSSascha Wildner acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS) 10245db2f26eSSascha Wildner { 10255db2f26eSSascha Wildner struct acpi_cpu_softc *sc; 10265db2f26eSSascha Wildner struct sbuf sb; 10275db2f26eSSascha Wildner char buf[128]; 10285db2f26eSSascha Wildner int i; 10295db2f26eSSascha Wildner uintmax_t fract, sum, whole; 10305db2f26eSSascha Wildner 10315db2f26eSSascha Wildner sc = (struct acpi_cpu_softc *) arg1; 10325db2f26eSSascha Wildner sum = 0; 10335db2f26eSSascha Wildner for (i = 0; i < sc->cpu_cx_count; i++) 10345db2f26eSSascha Wildner sum += sc->cpu_cx_stats[i]; 10355db2f26eSSascha Wildner sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 10365db2f26eSSascha Wildner for (i = 0; i < sc->cpu_cx_count; i++) { 10375db2f26eSSascha Wildner if (sum > 0) { 10385db2f26eSSascha Wildner whole = (uintmax_t)sc->cpu_cx_stats[i] * 100; 10395db2f26eSSascha Wildner fract = (whole % sum) * 100; 10405db2f26eSSascha Wildner sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum), 10415db2f26eSSascha Wildner (u_int)(fract / sum)); 10425db2f26eSSascha Wildner } else 10435db2f26eSSascha Wildner sbuf_printf(&sb, "0.00%% "); 10445db2f26eSSascha Wildner } 10455db2f26eSSascha Wildner sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep); 10465db2f26eSSascha Wildner sbuf_trim(&sb); 10475db2f26eSSascha Wildner sbuf_finish(&sb); 10485db2f26eSSascha Wildner sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 10495db2f26eSSascha Wildner sbuf_delete(&sb); 10505db2f26eSSascha Wildner 10515db2f26eSSascha Wildner return (0); 10525db2f26eSSascha Wildner } 10535db2f26eSSascha Wildner 10545db2f26eSSascha Wildner static int 1055c241507cSSepherosa Ziehau acpi_cpu_set_cx_lowest_oncpu(struct acpi_cpu_softc *sc, int val) 10565db2f26eSSascha Wildner { 1057febc8c49SSepherosa Ziehau int old_lowest, error = 0; 10585db2f26eSSascha Wildner uint32_t old_type, type; 10595db2f26eSSascha Wildner 1060c241507cSSepherosa Ziehau KKASSERT(mycpuid == sc->cpu_id); 1061c241507cSSepherosa Ziehau 10621d730338SSepherosa Ziehau sc->cpu_cx_lowest_req = val; 10631d730338SSepherosa Ziehau if (val > sc->cpu_cx_count - 1) 10641d730338SSepherosa Ziehau val = sc->cpu_cx_count - 1; 10655db2f26eSSascha Wildner old_lowest = atomic_swap_int(&sc->cpu_cx_lowest, val); 10665db2f26eSSascha Wildner 10675db2f26eSSascha Wildner old_type = sc->cpu_cx_states[old_lowest].type; 10685db2f26eSSascha Wildner type = sc->cpu_cx_states[val].type; 1069b42fff25SSepherosa Ziehau if (old_type >= ACPI_STATE_C3 && type < ACPI_STATE_C3) { 10705db2f26eSSascha Wildner KKASSERT(cpu_c3_ncpus > 0); 10715db2f26eSSascha Wildner if (atomic_fetchadd_int(&cpu_c3_ncpus, -1) == 1) { 10725db2f26eSSascha Wildner /* 10735db2f26eSSascha Wildner * All of the CPUs exit C3 state, use a better 10745db2f26eSSascha Wildner * one shot timer. 10755db2f26eSSascha Wildner */ 10765db2f26eSSascha Wildner error = cputimer_intr_select_caps(CPUTIMER_INTR_CAP_NONE); 10773b24650bSSepherosa Ziehau KKASSERT(!error || error == ERESTART); 10783b24650bSSepherosa Ziehau if (error == ERESTART) { 10793b24650bSSepherosa Ziehau if (bootverbose) 10803b24650bSSepherosa Ziehau kprintf("exit C3, restart intr cputimer\n"); 10815db2f26eSSascha Wildner cputimer_intr_restart(); 10825db2f26eSSascha Wildner } 10833b24650bSSepherosa Ziehau } 1084b42fff25SSepherosa Ziehau } else if (type >= ACPI_STATE_C3 && old_type < ACPI_STATE_C3) { 10855db2f26eSSascha Wildner if (atomic_fetchadd_int(&cpu_c3_ncpus, 1) == 0) { 10865db2f26eSSascha Wildner /* 1087b42fff25SSepherosa Ziehau * When the first CPU enters C3(+) state, switch 10885db2f26eSSascha Wildner * to an one shot timer, which could handle 1089b42fff25SSepherosa Ziehau * C3(+) state, i.e. the timer will not hang. 10905db2f26eSSascha Wildner */ 10915db2f26eSSascha Wildner error = cputimer_intr_select_caps(CPUTIMER_INTR_CAP_PS); 10923b24650bSSepherosa Ziehau if (error == ERESTART) { 10933b24650bSSepherosa Ziehau if (bootverbose) 10943b24650bSSepherosa Ziehau kprintf("enter C3, restart intr cputimer\n"); 10955db2f26eSSascha Wildner cputimer_intr_restart(); 10963b24650bSSepherosa Ziehau } else if (error) { 10975db2f26eSSascha Wildner kprintf("no suitable intr cputimer found\n"); 10985db2f26eSSascha Wildner 10995db2f26eSSascha Wildner /* Restore */ 11005db2f26eSSascha Wildner sc->cpu_cx_lowest = old_lowest; 11015db2f26eSSascha Wildner atomic_fetchadd_int(&cpu_c3_ncpus, -1); 11025db2f26eSSascha Wildner } 11035db2f26eSSascha Wildner } 11045db2f26eSSascha Wildner } 11055db2f26eSSascha Wildner 11065db2f26eSSascha Wildner if (error) 11075db2f26eSSascha Wildner return error; 11085db2f26eSSascha Wildner 1109febc8c49SSepherosa Ziehau /* Cache the new lowest non-C3 state. */ 1110febc8c49SSepherosa Ziehau acpi_cpu_cx_non_c3(sc); 11115db2f26eSSascha Wildner 11125db2f26eSSascha Wildner /* Reset the statistics counters. */ 11135db2f26eSSascha Wildner bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats)); 11145db2f26eSSascha Wildner return (0); 11155db2f26eSSascha Wildner } 11165db2f26eSSascha Wildner 1117c241507cSSepherosa Ziehau static void 1118c241507cSSepherosa Ziehau acpi_cst_set_lowest_handler(netmsg_t msg) 1119c241507cSSepherosa Ziehau { 1120c241507cSSepherosa Ziehau struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg; 1121c241507cSSepherosa Ziehau int error; 1122c241507cSSepherosa Ziehau 1123c241507cSSepherosa Ziehau error = acpi_cpu_set_cx_lowest_oncpu(rmsg->sc, rmsg->val); 1124c241507cSSepherosa Ziehau lwkt_replymsg(&rmsg->base.lmsg, error); 1125c241507cSSepherosa Ziehau } 1126c241507cSSepherosa Ziehau 1127c241507cSSepherosa Ziehau static int 1128c241507cSSepherosa Ziehau acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc, int val) 1129c241507cSSepherosa Ziehau { 1130c241507cSSepherosa Ziehau struct netmsg_acpi_cst msg; 1131c241507cSSepherosa Ziehau 1132c241507cSSepherosa Ziehau netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 1133c241507cSSepherosa Ziehau acpi_cst_set_lowest_handler); 1134c241507cSSepherosa Ziehau msg.sc = sc; 1135c241507cSSepherosa Ziehau msg.val = val; 1136c241507cSSepherosa Ziehau 1137c241507cSSepherosa Ziehau return lwkt_domsg(netisr_cpuport(sc->cpu_id), &msg.base.lmsg, 0); 1138c241507cSSepherosa Ziehau } 1139c241507cSSepherosa Ziehau 11405db2f26eSSascha Wildner static int 11415db2f26eSSascha Wildner acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) 11425db2f26eSSascha Wildner { 11435db2f26eSSascha Wildner struct acpi_cpu_softc *sc; 11445db2f26eSSascha Wildner char state[8]; 11455db2f26eSSascha Wildner int val, error; 11465db2f26eSSascha Wildner 11475db2f26eSSascha Wildner sc = (struct acpi_cpu_softc *)arg1; 11481d730338SSepherosa Ziehau ksnprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_req + 1); 11495db2f26eSSascha Wildner error = sysctl_handle_string(oidp, state, sizeof(state), req); 11505db2f26eSSascha Wildner if (error != 0 || req->newptr == NULL) 11515db2f26eSSascha Wildner return (error); 11525db2f26eSSascha Wildner if (strlen(state) < 2 || toupper(state[0]) != 'C') 11535db2f26eSSascha Wildner return (EINVAL); 11545db2f26eSSascha Wildner val = (int) strtol(state + 1, NULL, 10) - 1; 11551d730338SSepherosa Ziehau if (val < 0) 11565db2f26eSSascha Wildner return (EINVAL); 11575db2f26eSSascha Wildner 1158b45624acSSepherosa Ziehau lwkt_serialize_enter(&cpu_cx_slize); 11595db2f26eSSascha Wildner error = acpi_cpu_set_cx_lowest(sc, val); 1160b45624acSSepherosa Ziehau lwkt_serialize_exit(&cpu_cx_slize); 11615db2f26eSSascha Wildner 11625db2f26eSSascha Wildner return error; 11635db2f26eSSascha Wildner } 11645db2f26eSSascha Wildner 11655db2f26eSSascha Wildner static int 11661d730338SSepherosa Ziehau acpi_cpu_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS) 11671d730338SSepherosa Ziehau { 11681d730338SSepherosa Ziehau struct acpi_cpu_softc *sc; 11691d730338SSepherosa Ziehau char state[8]; 11701d730338SSepherosa Ziehau 11711d730338SSepherosa Ziehau sc = (struct acpi_cpu_softc *)arg1; 11721d730338SSepherosa Ziehau ksnprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest + 1); 11731d730338SSepherosa Ziehau return sysctl_handle_string(oidp, state, sizeof(state), req); 11741d730338SSepherosa Ziehau } 11751d730338SSepherosa Ziehau 11761d730338SSepherosa Ziehau static int 11775db2f26eSSascha Wildner acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) 11785db2f26eSSascha Wildner { 11795db2f26eSSascha Wildner struct acpi_cpu_softc *sc; 11805db2f26eSSascha Wildner char state[8]; 11815db2f26eSSascha Wildner int val, error, i; 11825db2f26eSSascha Wildner 11831d730338SSepherosa Ziehau ksnprintf(state, sizeof(state), "C%d", cpu_cx_lowest_req + 1); 11845db2f26eSSascha Wildner error = sysctl_handle_string(oidp, state, sizeof(state), req); 11855db2f26eSSascha Wildner if (error != 0 || req->newptr == NULL) 11865db2f26eSSascha Wildner return (error); 11875db2f26eSSascha Wildner if (strlen(state) < 2 || toupper(state[0]) != 'C') 11885db2f26eSSascha Wildner return (EINVAL); 11895db2f26eSSascha Wildner val = (int) strtol(state + 1, NULL, 10) - 1; 11901d730338SSepherosa Ziehau if (val < 0) 11915db2f26eSSascha Wildner return (EINVAL); 11921d730338SSepherosa Ziehau 1193b45624acSSepherosa Ziehau lwkt_serialize_enter(&cpu_cx_slize); 1194b45624acSSepherosa Ziehau 11951d730338SSepherosa Ziehau cpu_cx_lowest_req = val; 11965db2f26eSSascha Wildner cpu_cx_lowest = val; 11971d730338SSepherosa Ziehau if (cpu_cx_lowest > cpu_cx_count - 1) 11981d730338SSepherosa Ziehau cpu_cx_lowest = cpu_cx_count - 1; 11995db2f26eSSascha Wildner 12005db2f26eSSascha Wildner /* Update the new lowest useable Cx state for all CPUs. */ 12015db2f26eSSascha Wildner for (i = 0; i < cpu_ndevices; i++) { 12025db2f26eSSascha Wildner sc = device_get_softc(cpu_devices[i]); 12035db2f26eSSascha Wildner error = acpi_cpu_set_cx_lowest(sc, val); 12045db2f26eSSascha Wildner if (error) { 12055db2f26eSSascha Wildner KKASSERT(i == 0); 12065db2f26eSSascha Wildner break; 12075db2f26eSSascha Wildner } 12085db2f26eSSascha Wildner } 1209b45624acSSepherosa Ziehau 1210b45624acSSepherosa Ziehau lwkt_serialize_exit(&cpu_cx_slize); 12115db2f26eSSascha Wildner 12125db2f26eSSascha Wildner return error; 12135db2f26eSSascha Wildner } 12145db2f26eSSascha Wildner 12151d730338SSepherosa Ziehau static int 12161d730338SSepherosa Ziehau acpi_cpu_global_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS) 12171d730338SSepherosa Ziehau { 12181d730338SSepherosa Ziehau char state[8]; 12191d730338SSepherosa Ziehau 12201d730338SSepherosa Ziehau ksnprintf(state, sizeof(state), "C%d", cpu_cx_lowest + 1); 12211d730338SSepherosa Ziehau return sysctl_handle_string(oidp, state, sizeof(state), req); 12221d730338SSepherosa Ziehau } 12231d730338SSepherosa Ziehau 12245db2f26eSSascha Wildner /* 12255db2f26eSSascha Wildner * Put the CPU in C1 in a machine-dependant way. 12265db2f26eSSascha Wildner * XXX: shouldn't be here! 12275db2f26eSSascha Wildner */ 12285db2f26eSSascha Wildner static void 12295db2f26eSSascha Wildner acpi_cpu_c1(void) 12305db2f26eSSascha Wildner { 12315db2f26eSSascha Wildner #ifdef __ia64__ 12325db2f26eSSascha Wildner ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0); 12335db2f26eSSascha Wildner #else 12345db2f26eSSascha Wildner splz(); 12355db2f26eSSascha Wildner if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) 12365db2f26eSSascha Wildner __asm __volatile("sti; hlt"); 12375db2f26eSSascha Wildner else 12385db2f26eSSascha Wildner __asm __volatile("sti; pause"); 12395db2f26eSSascha Wildner #endif /* !__ia64__ */ 12405db2f26eSSascha Wildner } 1241febc8c49SSepherosa Ziehau 1242febc8c49SSepherosa Ziehau static void 1243febc8c49SSepherosa Ziehau acpi_cpu_cx_non_c3(struct acpi_cpu_softc *sc) 1244febc8c49SSepherosa Ziehau { 1245febc8c49SSepherosa Ziehau int i; 1246febc8c49SSepherosa Ziehau 1247febc8c49SSepherosa Ziehau sc->cpu_non_c3 = 0; 1248febc8c49SSepherosa Ziehau for (i = sc->cpu_cx_lowest; i >= 0; i--) { 1249febc8c49SSepherosa Ziehau if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) { 1250febc8c49SSepherosa Ziehau sc->cpu_non_c3 = i; 1251febc8c49SSepherosa Ziehau break; 1252febc8c49SSepherosa Ziehau } 1253febc8c49SSepherosa Ziehau } 1254febc8c49SSepherosa Ziehau if (bootverbose) 1255febc8c49SSepherosa Ziehau device_printf(sc->cpu_dev, "non-C3 %d\n", sc->cpu_non_c3); 1256febc8c49SSepherosa Ziehau } 12574cf48621SSepherosa Ziehau 12584cf48621SSepherosa Ziehau /* 12594cf48621SSepherosa Ziehau * Update the largest Cx state supported in the global cpu_cx_count. 12604cf48621SSepherosa Ziehau * It will be used in the global Cx sysctl handler. 12614cf48621SSepherosa Ziehau */ 12624cf48621SSepherosa Ziehau static void 12634cf48621SSepherosa Ziehau acpi_cpu_global_cx_count(void) 12644cf48621SSepherosa Ziehau { 12654cf48621SSepherosa Ziehau struct acpi_cpu_softc *sc; 12664cf48621SSepherosa Ziehau int i; 12674cf48621SSepherosa Ziehau 12684cf48621SSepherosa Ziehau if (cpu_ndevices == 0) { 12694cf48621SSepherosa Ziehau cpu_cx_count = 0; 12704cf48621SSepherosa Ziehau return; 12714cf48621SSepherosa Ziehau } 12724cf48621SSepherosa Ziehau 12734cf48621SSepherosa Ziehau sc = device_get_softc(cpu_devices[0]); 12744cf48621SSepherosa Ziehau cpu_cx_count = sc->cpu_cx_count; 12754cf48621SSepherosa Ziehau 12764cf48621SSepherosa Ziehau for (i = 1; i < cpu_ndevices; i++) { 12774cf48621SSepherosa Ziehau struct acpi_cpu_softc *sc = device_get_softc(cpu_devices[i]); 12784cf48621SSepherosa Ziehau 12794cf48621SSepherosa Ziehau if (sc->cpu_cx_count < cpu_cx_count) 12804cf48621SSepherosa Ziehau cpu_cx_count = sc->cpu_cx_count; 12814cf48621SSepherosa Ziehau } 12824cf48621SSepherosa Ziehau if (bootverbose) 12834cf48621SSepherosa Ziehau kprintf("cpu_cst: global Cx count %d\n", cpu_cx_count); 12844cf48621SSepherosa Ziehau } 1285