1 /*- 2 * Copyright (c) 2003-2005 Nate Lawson (SDG) 3 * Copyright (c) 2001 Michael Smith 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/acpica/acpi_cpu.c,v 1.72 2008/04/12 12:06:00 rpaulo Exp $ 28 */ 29 30 #include "opt_acpi.h" 31 #include <sys/param.h> 32 #include <sys/bus.h> 33 #include <sys/kernel.h> 34 #include <sys/malloc.h> 35 #include <sys/globaldata.h> 36 #include <sys/power.h> 37 #include <sys/proc.h> 38 #include <sys/sbuf.h> 39 #include <sys/thread2.h> 40 #include <sys/serialize.h> 41 #include <sys/msgport2.h> 42 #include <sys/microtime_pcpu.h> 43 44 #include <bus/pci/pcivar.h> 45 #include <machine/atomic.h> 46 #include <machine/globaldata.h> 47 #include <machine/md_var.h> 48 #include <machine/smp.h> 49 #include <sys/rman.h> 50 51 #include <net/netisr2.h> 52 #include <net/netmsg2.h> 53 #include <net/if_var.h> 54 55 #include "acpi.h" 56 #include "acpivar.h" 57 #include "acpi_cpu.h" 58 59 /* 60 * Support for ACPI Processor devices, including C[1-3] sleep states. 61 */ 62 63 /* Hooks for the ACPI CA debugging infrastructure */ 64 #define _COMPONENT ACPI_PROCESSOR 65 ACPI_MODULE_NAME("PROCESSOR") 66 67 struct netmsg_acpi_cst { 68 struct netmsg_base base; 69 struct acpi_cst_softc *sc; 70 int val; 71 }; 72 73 struct acpi_cx { 74 struct resource *p_lvlx; /* Register to read to enter state. */ 75 int rid; /* rid of p_lvlx */ 76 uint32_t type; /* C1-3 (C4 and up treated as C3). */ 77 uint32_t trans_lat; /* Transition latency (usec). */ 78 uint32_t power; /* Power consumed (mW). */ 79 int res_type; /* Resource type for p_lvlx. */ 80 bus_space_tag_t btag; 81 bus_space_handle_t bhand; 82 }; 83 #define MAX_CX_STATES 8 84 85 struct acpi_cst_softc { 86 device_t cst_dev; 87 struct acpi_cpux_softc *cst_parent; 88 ACPI_HANDLE cst_handle; 89 int cst_cpuid; 90 uint32_t cst_flags; /* ACPI_CST_FLAG_ */ 91 uint32_t cst_p_blk; /* ACPI P_BLK location */ 92 uint32_t cst_p_blk_len; /* P_BLK length (must be 6). */ 93 struct acpi_cx cst_cx_states[MAX_CX_STATES]; 94 int cst_cx_count; /* Number of valid Cx states. */ 95 int cst_prev_sleep; /* Last idle sleep duration. */ 96 /* Runtime state. */ 97 int cst_non_c3; /* Index of lowest non-C3 state. */ 98 u_long cst_cx_stats[MAX_CX_STATES];/* Cx usage history. */ 99 /* Values for sysctl. */ 100 int cst_cx_lowest; /* Current Cx lowest */ 101 int cst_cx_lowest_req; /* Requested Cx lowest */ 102 char cst_cx_supported[64]; 103 }; 104 105 #define ACPI_CST_FLAG_PROBING 0x1 106 107 #define ACPI_CST_ENTER_IO(cx) bus_space_read_1((cx)->btag, (cx)->bhand, 0) 108 109 #define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */ 110 #define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */ 111 112 #define PCI_VENDOR_INTEL 0x8086 113 #define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ 114 #define PCI_REVISION_A_STEP 0 115 #define PCI_REVISION_B_STEP 1 116 #define PCI_REVISION_4E 2 117 #define PCI_REVISION_4M 3 118 #define PIIX4_DEVACTB_REG 0x58 119 #define PIIX4_BRLD_EN_IRQ0 (1<<0) 120 #define PIIX4_BRLD_EN_IRQ (1<<1) 121 #define PIIX4_BRLD_EN_IRQ8 (1<<5) 122 #define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8) 123 #define PIIX4_PCNTRL_BST_EN (1<<10) 124 125 /* Platform hardware resource information. */ 126 static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */ 127 static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */ 128 static int cpu_quirks; /* Indicate any hardware bugs. */ 129 130 /* Runtime state. */ 131 static int cpu_disable_idle; /* Disable entry to idle function */ 132 static int cpu_cx_count; /* Number of valid Cx states */ 133 134 /* Values for sysctl. */ 135 static int cpu_cx_generic; 136 static int cpu_cx_lowest; /* Current Cx lowest */ 137 static int cpu_cx_lowest_req; /* Requested Cx lowest */ 138 static struct lwkt_serialize cpu_cx_slize = LWKT_SERIALIZE_INITIALIZER; 139 140 /* C3 state transition */ 141 static int cpu_c3_ncpus; 142 143 static device_t *cpu_devices; 144 static int cpu_ndevices; 145 static struct acpi_cst_softc **cpu_softc; 146 147 static int acpi_cst_probe(device_t dev); 148 static int acpi_cst_attach(device_t dev); 149 static int acpi_cst_suspend(device_t dev); 150 static int acpi_cst_resume(device_t dev); 151 static int acpi_cst_shutdown(device_t dev); 152 153 static void acpi_cpu_cx_probe(struct acpi_cst_softc *sc); 154 static void acpi_cpu_generic_cx_probe(struct acpi_cst_softc *sc); 155 static int acpi_cpu_cx_cst(struct acpi_cst_softc *sc); 156 static int acpi_cpu_cx_cst_dispatch(struct acpi_cst_softc *sc); 157 static void acpi_cpu_startup(void *arg); 158 static void acpi_cpu_startup_cx(struct acpi_cst_softc *sc); 159 static void acpi_cpu_cx_list(struct acpi_cst_softc *sc); 160 static void acpi_cpu_idle(void); 161 static void acpi_cpu_cst_notify(device_t); 162 static int acpi_cpu_quirks(void); 163 static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS); 164 static int acpi_cpu_set_cx_lowest(struct acpi_cst_softc *, int); 165 static int acpi_cpu_set_cx_lowest_oncpu(struct acpi_cst_softc *, int); 166 static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 167 static int acpi_cpu_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS); 168 static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 169 static int acpi_cpu_global_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS); 170 static void acpi_cpu_cx_non_c3(struct acpi_cst_softc *sc); 171 static void acpi_cpu_global_cx_count(void); 172 173 static void acpi_cpu_c1(void); /* XXX */ 174 175 static device_method_t acpi_cst_methods[] = { 176 /* Device interface */ 177 DEVMETHOD(device_probe, acpi_cst_probe), 178 DEVMETHOD(device_attach, acpi_cst_attach), 179 DEVMETHOD(device_detach, bus_generic_detach), 180 DEVMETHOD(device_shutdown, acpi_cst_shutdown), 181 DEVMETHOD(device_suspend, acpi_cst_suspend), 182 DEVMETHOD(device_resume, acpi_cst_resume), 183 184 /* Bus interface */ 185 DEVMETHOD(bus_add_child, bus_generic_add_child), 186 DEVMETHOD(bus_read_ivar, bus_generic_read_ivar), 187 DEVMETHOD(bus_get_resource_list, bus_generic_get_resource_list), 188 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 189 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), 190 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), 191 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), 192 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 193 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 194 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 195 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 196 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 197 DEVMETHOD_END 198 }; 199 200 static driver_t acpi_cst_driver = { 201 "cpu_cst", 202 acpi_cst_methods, 203 sizeof(struct acpi_cst_softc), 204 }; 205 206 static devclass_t acpi_cst_devclass; 207 DRIVER_MODULE(cpu_cst, cpu, acpi_cst_driver, acpi_cst_devclass, NULL, NULL); 208 MODULE_DEPEND(cpu_cst, acpi, 1, 1, 1); 209 210 static int 211 acpi_cst_probe(device_t dev) 212 { 213 int cpu_id; 214 215 if (acpi_disabled("cpu_cst") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR) 216 return (ENXIO); 217 218 cpu_id = acpi_get_magic(dev); 219 220 if (cpu_softc == NULL) 221 cpu_softc = kmalloc(sizeof(struct acpi_cst_softc *) * 222 SMP_MAXCPU, M_TEMP /* XXX */, M_INTWAIT | M_ZERO); 223 224 /* 225 * Check if we already probed this processor. We scan the bus twice 226 * so it's possible we've already seen this one. 227 */ 228 if (cpu_softc[cpu_id] != NULL) { 229 device_printf(dev, "CPU%d cstate already exist\n", cpu_id); 230 return (ENXIO); 231 } 232 233 /* Mark this processor as in-use and save our derived id for attach. */ 234 cpu_softc[cpu_id] = (void *)1; 235 device_set_desc(dev, "ACPI CPU C-State"); 236 237 return (0); 238 } 239 240 static int 241 acpi_cst_attach(device_t dev) 242 { 243 ACPI_BUFFER buf; 244 ACPI_OBJECT *obj; 245 struct acpi_cst_softc *sc; 246 ACPI_STATUS status; 247 248 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 249 250 sc = device_get_softc(dev); 251 sc->cst_dev = dev; 252 sc->cst_parent = device_get_softc(device_get_parent(dev)); 253 sc->cst_handle = acpi_get_handle(dev); 254 sc->cst_cpuid = acpi_get_magic(dev); 255 cpu_softc[sc->cst_cpuid] = sc; 256 cpu_smi_cmd = AcpiGbl_FADT.SmiCommand; 257 cpu_cst_cnt = AcpiGbl_FADT.CstControl; 258 259 buf.Pointer = NULL; 260 buf.Length = ACPI_ALLOCATE_BUFFER; 261 status = AcpiEvaluateObject(sc->cst_handle, NULL, NULL, &buf); 262 if (ACPI_FAILURE(status)) { 263 device_printf(dev, "attach failed to get Processor obj - %s\n", 264 AcpiFormatException(status)); 265 return (ENXIO); 266 } 267 obj = (ACPI_OBJECT *)buf.Pointer; 268 sc->cst_p_blk = obj->Processor.PblkAddress; 269 sc->cst_p_blk_len = obj->Processor.PblkLength; 270 AcpiOsFree(obj); 271 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n", 272 device_get_unit(dev), sc->cst_p_blk, sc->cst_p_blk_len)); 273 274 /* 275 * If this is the first cpu we attach, create and initialize the generic 276 * resources that will be used by all acpi cpu devices. 277 */ 278 if (device_get_unit(dev) == 0) { 279 /* Assume we won't be using generic Cx mode by default */ 280 cpu_cx_generic = FALSE; 281 282 /* Queue post cpu-probing task handler */ 283 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL); 284 } 285 286 /* Probe for Cx state support. */ 287 acpi_cpu_cx_probe(sc); 288 289 /* Finally, call identify and probe/attach for child devices. */ 290 bus_generic_probe(dev); 291 bus_generic_attach(dev); 292 293 return (0); 294 } 295 296 /* 297 * Disable any entry to the idle function during suspend and re-enable it 298 * during resume. 299 */ 300 static int 301 acpi_cst_suspend(device_t dev) 302 { 303 int error; 304 305 error = bus_generic_suspend(dev); 306 if (error) 307 return (error); 308 cpu_disable_idle = TRUE; 309 return (0); 310 } 311 312 static int 313 acpi_cst_resume(device_t dev) 314 { 315 316 cpu_disable_idle = FALSE; 317 return (bus_generic_resume(dev)); 318 } 319 320 static int 321 acpi_cst_shutdown(device_t dev) 322 { 323 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 324 325 /* Allow children to shutdown first. */ 326 bus_generic_shutdown(dev); 327 328 /* 329 * Disable any entry to the idle function. There is a small race where 330 * an idle thread have passed this check but not gone to sleep. This 331 * is ok since device_shutdown() does not free the softc, otherwise 332 * we'd have to be sure all threads were evicted before returning. 333 */ 334 cpu_disable_idle = TRUE; 335 336 return_VALUE (0); 337 } 338 339 static void 340 acpi_cpu_cx_probe(struct acpi_cst_softc *sc) 341 { 342 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 343 344 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 345 sc->cst_prev_sleep = 1000000; 346 sc->cst_cx_lowest = 0; 347 sc->cst_cx_lowest_req = 0; 348 349 /* 350 * Check for the ACPI 2.0 _CST sleep states object. If we can't find 351 * any, we'll revert to generic FADT/P_BLK Cx control method which will 352 * be handled by acpi_cpu_startup. We need to defer to after having 353 * probed all the cpus in the system before probing for generic Cx 354 * states as we may already have found cpus with valid _CST packages 355 */ 356 if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) { 357 /* 358 * We were unable to find a _CST package for this cpu or there 359 * was an error parsing it. Switch back to generic mode. 360 */ 361 cpu_cx_generic = TRUE; 362 if (bootverbose) 363 device_printf(sc->cst_dev, "switching to generic Cx mode\n"); 364 } 365 366 /* 367 * TODO: _CSD Package should be checked here. 368 */ 369 } 370 371 static void 372 acpi_cpu_generic_cx_probe(struct acpi_cst_softc *sc) 373 { 374 ACPI_GENERIC_ADDRESS gas; 375 struct acpi_cx *cx_ptr; 376 377 sc->cst_cx_count = 0; 378 cx_ptr = sc->cst_cx_states; 379 380 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 381 sc->cst_prev_sleep = 1000000; 382 383 /* C1 has been required since just after ACPI 1.0 */ 384 cx_ptr->type = ACPI_STATE_C1; 385 cx_ptr->trans_lat = 0; 386 cx_ptr++; 387 sc->cst_cx_count++; 388 389 /* C2(+) is not supported on MP system */ 390 if (ncpus > 1 && (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0) 391 return; 392 393 /* 394 * The spec says P_BLK must be 6 bytes long. However, some systems 395 * use it to indicate a fractional set of features present so we 396 * take 5 as C2. Some may also have a value of 7 to indicate 397 * another C3 but most use _CST for this (as required) and having 398 * "only" C1-C3 is not a hardship. 399 */ 400 if (sc->cst_p_blk_len < 5) 401 return; 402 403 /* Validate and allocate resources for C2 (P_LVL2). */ 404 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 405 gas.BitWidth = 8; 406 if (AcpiGbl_FADT.C2Latency <= 100) { 407 gas.Address = sc->cst_p_blk + 4; 408 409 cx_ptr->rid = sc->cst_parent->cpux_next_rid; 410 acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->type, &cx_ptr->rid, &gas, 411 &cx_ptr->p_lvlx, RF_SHAREABLE); 412 if (cx_ptr->p_lvlx != NULL) { 413 sc->cst_parent->cpux_next_rid++; 414 cx_ptr->type = ACPI_STATE_C2; 415 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency; 416 cx_ptr->btag = rman_get_bustag(cx_ptr->p_lvlx); 417 cx_ptr->bhand = rman_get_bushandle(cx_ptr->p_lvlx); 418 cx_ptr++; 419 sc->cst_cx_count++; 420 sc->cst_non_c3 = 1; 421 } 422 } 423 if (sc->cst_p_blk_len < 6) 424 return; 425 426 /* Validate and allocate resources for C3 (P_LVL3). */ 427 if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) { 428 gas.Address = sc->cst_p_blk + 5; 429 430 cx_ptr->rid = sc->cst_parent->cpux_next_rid; 431 acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->type, &cx_ptr->rid, &gas, 432 &cx_ptr->p_lvlx, RF_SHAREABLE); 433 if (cx_ptr->p_lvlx != NULL) { 434 sc->cst_parent->cpux_next_rid++; 435 cx_ptr->type = ACPI_STATE_C3; 436 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency; 437 cx_ptr->btag = rman_get_bustag(cx_ptr->p_lvlx); 438 cx_ptr->bhand = rman_get_bushandle(cx_ptr->p_lvlx); 439 cx_ptr++; 440 sc->cst_cx_count++; 441 } 442 } 443 } 444 445 /* 446 * Parse a _CST package and set up its Cx states. Since the _CST object 447 * can change dynamically, our notify handler may call this function 448 * to clean up and probe the new _CST package. 449 */ 450 static int 451 acpi_cpu_cx_cst(struct acpi_cst_softc *sc) 452 { 453 struct acpi_cx *cx_ptr; 454 ACPI_STATUS status; 455 ACPI_BUFFER buf; 456 ACPI_OBJECT *top; 457 ACPI_OBJECT *pkg; 458 uint32_t count; 459 int i; 460 461 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 462 463 buf.Pointer = NULL; 464 buf.Length = ACPI_ALLOCATE_BUFFER; 465 status = AcpiEvaluateObject(sc->cst_handle, "_CST", NULL, &buf); 466 if (ACPI_FAILURE(status)) 467 return (ENXIO); 468 469 /* _CST is a package with a count and at least one Cx package. */ 470 top = (ACPI_OBJECT *)buf.Pointer; 471 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) { 472 device_printf(sc->cst_dev, "invalid _CST package\n"); 473 AcpiOsFree(buf.Pointer); 474 return (ENXIO); 475 } 476 if (count != top->Package.Count - 1) { 477 device_printf(sc->cst_dev, "invalid _CST state count (%d != %d)\n", 478 count, top->Package.Count - 1); 479 count = top->Package.Count - 1; 480 } 481 if (count > MAX_CX_STATES) { 482 device_printf(sc->cst_dev, "_CST has too many states (%d)\n", count); 483 count = MAX_CX_STATES; 484 } 485 486 sc->cst_flags |= ACPI_CST_FLAG_PROBING; 487 cpu_sfence(); 488 489 for (i = 0; i < sc->cst_cx_count; ++i) { 490 cx_ptr = &sc->cst_cx_states[i]; 491 492 /* Free up any previous register. */ 493 if (cx_ptr->p_lvlx != NULL) { 494 bus_release_resource(sc->cst_dev, cx_ptr->res_type, cx_ptr->rid, 495 cx_ptr->p_lvlx); 496 cx_ptr->p_lvlx = NULL; 497 } 498 } 499 500 /* Set up all valid states. */ 501 sc->cst_cx_count = 0; 502 cx_ptr = sc->cst_cx_states; 503 for (i = 0; i < count; i++) { 504 pkg = &top->Package.Elements[i + 1]; 505 if (!ACPI_PKG_VALID(pkg, 4) || 506 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 || 507 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 || 508 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) { 509 510 device_printf(sc->cst_dev, "skipping invalid Cx state package\n"); 511 continue; 512 } 513 514 /* Validate the state to see if we should use it. */ 515 switch (cx_ptr->type) { 516 case ACPI_STATE_C1: 517 sc->cst_non_c3 = i; 518 cx_ptr++; 519 sc->cst_cx_count++; 520 continue; 521 case ACPI_STATE_C2: 522 sc->cst_non_c3 = i; 523 break; 524 case ACPI_STATE_C3: 525 default: 526 if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) { 527 528 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 529 "acpi_cpu%d: C3[%d] not available.\n", 530 device_get_unit(sc->cst_dev), i)); 531 continue; 532 } 533 break; 534 } 535 536 /* Allocate the control register for C2 or C3. */ 537 KASSERT(cx_ptr->p_lvlx == NULL, ("still has lvlx")); 538 cx_ptr->rid = sc->cst_parent->cpux_next_rid; 539 acpi_PkgGas(sc->cst_dev, pkg, 0, &cx_ptr->res_type, &cx_ptr->rid, 540 &cx_ptr->p_lvlx, RF_SHAREABLE); 541 if (cx_ptr->p_lvlx != NULL) { 542 sc->cst_parent->cpux_next_rid++; 543 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 544 "acpi_cpu%d: Got C%d - %d latency\n", 545 device_get_unit(sc->cst_dev), cx_ptr->type, 546 cx_ptr->trans_lat)); 547 cx_ptr->btag = rman_get_bustag(cx_ptr->p_lvlx); 548 cx_ptr->bhand = rman_get_bushandle(cx_ptr->p_lvlx); 549 cx_ptr++; 550 sc->cst_cx_count++; 551 } 552 } 553 AcpiOsFree(buf.Pointer); 554 555 /* 556 * Fix up the lowest Cx being used 557 */ 558 if (sc->cst_cx_lowest_req < sc->cst_cx_count) 559 sc->cst_cx_lowest = sc->cst_cx_lowest_req; 560 if (sc->cst_cx_lowest > sc->cst_cx_count - 1) 561 sc->cst_cx_lowest = sc->cst_cx_count - 1; 562 563 /* 564 * Cache the lowest non-C3 state. 565 * NOTE: must after cst_cx_lowest is set. 566 */ 567 acpi_cpu_cx_non_c3(sc); 568 569 cpu_sfence(); 570 sc->cst_flags &= ~ACPI_CST_FLAG_PROBING; 571 572 return (0); 573 } 574 575 static void 576 acpi_cst_probe_handler(netmsg_t msg) 577 { 578 struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg; 579 int error; 580 581 error = acpi_cpu_cx_cst(rmsg->sc); 582 lwkt_replymsg(&rmsg->base.lmsg, error); 583 } 584 585 static int 586 acpi_cpu_cx_cst_dispatch(struct acpi_cst_softc *sc) 587 { 588 struct netmsg_acpi_cst msg; 589 590 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 591 acpi_cst_probe_handler); 592 msg.sc = sc; 593 594 return lwkt_domsg(netisr_cpuport(sc->cst_cpuid), &msg.base.lmsg, 0); 595 } 596 597 /* 598 * Call this *after* all CPUs have been attached. 599 */ 600 static void 601 acpi_cpu_startup(void *arg) 602 { 603 struct acpi_cst_softc *sc; 604 int i; 605 606 /* Get set of CPU devices */ 607 devclass_get_devices(acpi_cst_devclass, &cpu_devices, &cpu_ndevices); 608 609 /* 610 * Setup any quirks that might necessary now that we have probed 611 * all the CPUs 612 */ 613 acpi_cpu_quirks(); 614 615 if (cpu_cx_generic) { 616 /* 617 * We are using generic Cx mode, probe for available Cx states 618 * for all processors. 619 */ 620 for (i = 0; i < cpu_ndevices; i++) { 621 sc = device_get_softc(cpu_devices[i]); 622 acpi_cpu_generic_cx_probe(sc); 623 } 624 } else { 625 /* 626 * We are using _CST mode, remove C3 state if necessary. 627 * 628 * As we now know for sure that we will be using _CST mode 629 * install our notify handler. 630 */ 631 for (i = 0; i < cpu_ndevices; i++) { 632 sc = device_get_softc(cpu_devices[i]); 633 if (cpu_quirks & CPU_QUIRK_NO_C3) 634 sc->cst_cx_count = sc->cst_non_c3 + 1; 635 sc->cst_parent->cpux_cst_notify = acpi_cpu_cst_notify; 636 } 637 } 638 acpi_cpu_global_cx_count(); 639 640 /* Perform Cx final initialization. */ 641 for (i = 0; i < cpu_ndevices; i++) { 642 sc = device_get_softc(cpu_devices[i]); 643 acpi_cpu_startup_cx(sc); 644 645 if (sc->cst_parent->glob_sysctl_tree != NULL) { 646 struct acpi_cpux_softc *cpux = sc->cst_parent; 647 648 /* Add a sysctl handler to handle global Cx lowest setting */ 649 SYSCTL_ADD_PROC(&cpux->glob_sysctl_ctx, 650 SYSCTL_CHILDREN(cpux->glob_sysctl_tree), 651 OID_AUTO, "cx_lowest", 652 CTLTYPE_STRING | CTLFLAG_RW, NULL, 0, 653 acpi_cpu_global_cx_lowest_sysctl, "A", 654 "Requested global lowest Cx sleep state"); 655 SYSCTL_ADD_PROC(&cpux->glob_sysctl_ctx, 656 SYSCTL_CHILDREN(cpux->glob_sysctl_tree), 657 OID_AUTO, "cx_lowest_use", 658 CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, 659 acpi_cpu_global_cx_lowest_use_sysctl, "A", 660 "Global lowest Cx sleep state to use"); 661 } 662 } 663 664 /* Take over idling from cpu_idle_default(). */ 665 cpu_cx_lowest = 0; 666 cpu_cx_lowest_req = 0; 667 cpu_disable_idle = FALSE; 668 cpu_idle_hook = acpi_cpu_idle; 669 } 670 671 static void 672 acpi_cpu_cx_list(struct acpi_cst_softc *sc) 673 { 674 struct sbuf sb; 675 int i; 676 677 /* 678 * Set up the list of Cx states 679 */ 680 sbuf_new(&sb, sc->cst_cx_supported, sizeof(sc->cst_cx_supported), 681 SBUF_FIXEDLEN); 682 for (i = 0; i < sc->cst_cx_count; i++) 683 sbuf_printf(&sb, "C%d/%d ", i + 1, sc->cst_cx_states[i].trans_lat); 684 sbuf_trim(&sb); 685 sbuf_finish(&sb); 686 } 687 688 static void 689 acpi_cpu_startup_cx(struct acpi_cst_softc *sc) 690 { 691 struct acpi_cpux_softc *cpux = sc->cst_parent; 692 693 acpi_cpu_cx_list(sc); 694 695 SYSCTL_ADD_STRING(&cpux->pcpu_sysctl_ctx, 696 SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree), 697 OID_AUTO, "cx_supported", CTLFLAG_RD, 698 sc->cst_cx_supported, 0, 699 "Cx/microsecond values for supported Cx states"); 700 SYSCTL_ADD_PROC(&cpux->pcpu_sysctl_ctx, 701 SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree), 702 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, 703 (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A", 704 "requested lowest Cx sleep state"); 705 SYSCTL_ADD_PROC(&cpux->pcpu_sysctl_ctx, 706 SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree), 707 OID_AUTO, "cx_lowest_use", CTLTYPE_STRING | CTLFLAG_RD, 708 (void *)sc, 0, acpi_cpu_cx_lowest_use_sysctl, "A", 709 "lowest Cx sleep state to use"); 710 SYSCTL_ADD_PROC(&cpux->pcpu_sysctl_ctx, 711 SYSCTL_CHILDREN(cpux->pcpu_sysctl_tree), 712 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD, 713 (void *)sc, 0, acpi_cpu_usage_sysctl, "A", 714 "percent usage for each Cx state"); 715 716 #ifdef notyet 717 /* Signal platform that we can handle _CST notification. */ 718 if (!cpu_cx_generic && cpu_cst_cnt != 0) { 719 ACPI_LOCK(acpi); 720 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8); 721 ACPI_UNLOCK(acpi); 722 } 723 #endif 724 } 725 726 /* 727 * Idle the CPU in the lowest state possible. This function is called with 728 * interrupts disabled. Note that once it re-enables interrupts, a task 729 * switch can occur so do not access shared data (i.e. the softc) after 730 * interrupts are re-enabled. 731 */ 732 static void 733 acpi_cpu_idle(void) 734 { 735 struct acpi_cst_softc *sc; 736 struct acpi_cx *cx_next; 737 union microtime_pcpu start, end; 738 uint64_t dummy; 739 int bm_active, cx_next_idx, i, tdiff; 740 741 /* If disabled, return immediately. */ 742 if (cpu_disable_idle) { 743 ACPI_ENABLE_IRQS(); 744 return; 745 } 746 747 /* 748 * Look up our CPU id to get our softc. If it's NULL, we'll use C1 749 * since there is no ACPI processor object for this CPU. This occurs 750 * for logical CPUs in the HTT case. 751 */ 752 sc = cpu_softc[mdcpu->mi.gd_cpuid]; 753 if (sc == NULL) { 754 acpi_cpu_c1(); 755 return; 756 } 757 758 /* Still probing; use C1 */ 759 if (sc->cst_flags & ACPI_CST_FLAG_PROBING) { 760 acpi_cpu_c1(); 761 return; 762 } 763 764 /* Find the lowest state that has small enough latency. */ 765 cx_next_idx = 0; 766 for (i = sc->cst_cx_lowest; i >= 0; i--) { 767 if (sc->cst_cx_states[i].trans_lat * 3 <= sc->cst_prev_sleep) { 768 cx_next_idx = i; 769 break; 770 } 771 } 772 773 /* 774 * If C3(+) is to be entered, check for bus master activity. 775 * If there was activity, clear the bit and use the lowest 776 * non-C3 state. 777 */ 778 cx_next = &sc->cst_cx_states[cx_next_idx]; 779 if (cx_next->type >= ACPI_STATE_C3 && 780 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 781 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active); 782 if (bm_active != 0) { 783 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); 784 cx_next_idx = min(cx_next_idx, sc->cst_non_c3); 785 } 786 } 787 788 /* Select the next state and update statistics. */ 789 cx_next = &sc->cst_cx_states[cx_next_idx]; 790 sc->cst_cx_stats[cx_next_idx]++; 791 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep")); 792 793 /* 794 * Execute HLT (or equivalent) and wait for an interrupt. We can't 795 * calculate the time spent in C1 since the place we wake up is an 796 * ISR. Assume we slept half of quantum and return. 797 */ 798 if (cx_next->type == ACPI_STATE_C1) { 799 sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + 500000 / hz) / 4; 800 acpi_cpu_c1(); 801 return; 802 } 803 804 /* 805 * For C3(+), disable bus master arbitration and enable bus master wake 806 * if BM control is available, otherwise flush the CPU cache. 807 */ 808 if (cx_next->type >= ACPI_STATE_C3) { 809 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 810 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); 811 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 812 } else 813 ACPI_FLUSH_CPU_CACHE(); 814 } 815 816 /* 817 * Read from P_LVLx to enter C2(+), checking time spent asleep. 818 */ 819 microtime_pcpu_get(&start); 820 cpu_mfence(); 821 822 ACPI_CST_ENTER_IO(cx_next); 823 /* 824 * Perform a dummy I/O read. Since it may take an arbitrary time 825 * to enter the idle state, this read makes sure that we are frozen. 826 */ 827 AcpiRead(&dummy, &AcpiGbl_FADT.XPmTimerBlock); 828 829 cpu_mfence(); 830 microtime_pcpu_get(&end); 831 832 /* Enable bus master arbitration and disable bus master wakeup. */ 833 if (cx_next->type >= ACPI_STATE_C3) { 834 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 835 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); 836 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 837 } 838 } 839 ACPI_ENABLE_IRQS(); 840 841 /* Find the actual time asleep in microseconds. */ 842 tdiff = microtime_pcpu_diff(&start, &end); 843 sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + tdiff) / 4; 844 } 845 846 /* 847 * Re-evaluate the _CST object when we are notified that it changed. 848 */ 849 static void 850 acpi_cpu_cst_notify(device_t dev) 851 { 852 struct acpi_cst_softc *sc = device_get_softc(dev); 853 854 KASSERT(curthread->td_type != TD_TYPE_NETISR, 855 ("notify in netisr%d", mycpuid)); 856 857 lwkt_serialize_enter(&cpu_cx_slize); 858 859 /* Update the list of Cx states. */ 860 acpi_cpu_cx_cst_dispatch(sc); 861 acpi_cpu_cx_list(sc); 862 863 /* Update the new lowest useable Cx state for all CPUs. */ 864 acpi_cpu_global_cx_count(); 865 866 /* 867 * Fix up the lowest Cx being used 868 */ 869 if (cpu_cx_lowest_req < cpu_cx_count) 870 cpu_cx_lowest = cpu_cx_lowest_req; 871 if (cpu_cx_lowest > cpu_cx_count - 1) 872 cpu_cx_lowest = cpu_cx_count - 1; 873 874 lwkt_serialize_exit(&cpu_cx_slize); 875 } 876 877 static int 878 acpi_cpu_quirks(void) 879 { 880 device_t acpi_dev; 881 uint32_t val; 882 883 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 884 885 /* 886 * Bus mastering arbitration control is needed to keep caches coherent 887 * while sleeping in C3. If it's not present but a working flush cache 888 * instruction is present, flush the caches before entering C3 instead. 889 * Otherwise, just disable C3 completely. 890 */ 891 if (AcpiGbl_FADT.Pm2ControlBlock == 0 || 892 AcpiGbl_FADT.Pm2ControlLength == 0) { 893 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) && 894 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) { 895 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; 896 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 897 "acpi_cpu: no BM control, using flush cache method\n")); 898 } else { 899 cpu_quirks |= CPU_QUIRK_NO_C3; 900 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 901 "acpi_cpu: no BM control, C3 not available\n")); 902 } 903 } 904 905 /* 906 * If we are using generic Cx mode, C3 on multiple CPUs requires using 907 * the expensive flush cache instruction. 908 */ 909 if (cpu_cx_generic && ncpus > 1) { 910 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; 911 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 912 "acpi_cpu: SMP, using flush cache mode for C3\n")); 913 } 914 915 /* Look for various quirks of the PIIX4 part. */ 916 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); 917 if (acpi_dev != NULL) { 918 switch (pci_get_revid(acpi_dev)) { 919 /* 920 * Disable C3 support for all PIIX4 chipsets. Some of these parts 921 * do not report the BMIDE status to the BM status register and 922 * others have a livelock bug if Type-F DMA is enabled. Linux 923 * works around the BMIDE bug by reading the BM status directly 924 * but we take the simpler approach of disabling C3 for these 925 * parts. 926 * 927 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 928 * Livelock") from the January 2002 PIIX4 specification update. 929 * Applies to all PIIX4 models. 930 * 931 * Also, make sure that all interrupts cause a "Stop Break" 932 * event to exit from C2 state. 933 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak) 934 * should be set to zero, otherwise it causes C2 to short-sleep. 935 * PIIX4 doesn't properly support C3 and bus master activity 936 * need not break out of C2. 937 */ 938 case PCI_REVISION_A_STEP: 939 case PCI_REVISION_B_STEP: 940 case PCI_REVISION_4E: 941 case PCI_REVISION_4M: 942 cpu_quirks |= CPU_QUIRK_NO_C3; 943 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 944 "acpi_cpu: working around PIIX4 bug, disabling C3\n")); 945 946 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4); 947 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) { 948 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 949 "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n")); 950 val |= PIIX4_STOP_BREAK_MASK; 951 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4); 952 } 953 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val); 954 if (val) { 955 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 956 "acpi_cpu: PIIX4: reset BRLD_EN_BM\n")); 957 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 958 } 959 break; 960 default: 961 break; 962 } 963 } 964 965 return (0); 966 } 967 968 static int 969 acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS) 970 { 971 struct acpi_cst_softc *sc; 972 struct sbuf sb; 973 char buf[128]; 974 int i; 975 uintmax_t fract, sum, whole; 976 977 sc = (struct acpi_cst_softc *) arg1; 978 sum = 0; 979 for (i = 0; i < sc->cst_cx_count; i++) 980 sum += sc->cst_cx_stats[i]; 981 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 982 for (i = 0; i < sc->cst_cx_count; i++) { 983 if (sum > 0) { 984 whole = (uintmax_t)sc->cst_cx_stats[i] * 100; 985 fract = (whole % sum) * 100; 986 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum), 987 (u_int)(fract / sum)); 988 } else 989 sbuf_printf(&sb, "0.00%% "); 990 } 991 sbuf_printf(&sb, "last %dus", sc->cst_prev_sleep); 992 sbuf_trim(&sb); 993 sbuf_finish(&sb); 994 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 995 sbuf_delete(&sb); 996 997 return (0); 998 } 999 1000 static int 1001 acpi_cpu_set_cx_lowest_oncpu(struct acpi_cst_softc *sc, int val) 1002 { 1003 int old_lowest, error = 0, old_lowest_req; 1004 uint32_t old_type, type; 1005 1006 KKASSERT(mycpuid == sc->cst_cpuid); 1007 1008 old_lowest_req = sc->cst_cx_lowest_req; 1009 sc->cst_cx_lowest_req = val; 1010 1011 if (val > sc->cst_cx_count - 1) 1012 val = sc->cst_cx_count - 1; 1013 old_lowest = atomic_swap_int(&sc->cst_cx_lowest, val); 1014 1015 old_type = sc->cst_cx_states[old_lowest].type; 1016 type = sc->cst_cx_states[val].type; 1017 if (old_type >= ACPI_STATE_C3 && type < ACPI_STATE_C3) { 1018 KKASSERT(cpu_c3_ncpus > 0); 1019 if (atomic_fetchadd_int(&cpu_c3_ncpus, -1) == 1) { 1020 /* 1021 * All of the CPUs exit C3 state, use a better 1022 * one shot timer. 1023 */ 1024 error = cputimer_intr_select_caps(CPUTIMER_INTR_CAP_NONE); 1025 KKASSERT(!error || error == ERESTART); 1026 if (error == ERESTART) { 1027 if (bootverbose) 1028 kprintf("exit C3, restart intr cputimer\n"); 1029 cputimer_intr_restart(); 1030 } 1031 } 1032 } else if (type >= ACPI_STATE_C3 && old_type < ACPI_STATE_C3) { 1033 if (atomic_fetchadd_int(&cpu_c3_ncpus, 1) == 0) { 1034 /* 1035 * When the first CPU enters C3(+) state, switch 1036 * to an one shot timer, which could handle 1037 * C3(+) state, i.e. the timer will not hang. 1038 */ 1039 error = cputimer_intr_select_caps(CPUTIMER_INTR_CAP_PS); 1040 if (error == ERESTART) { 1041 if (bootverbose) 1042 kprintf("enter C3, restart intr cputimer\n"); 1043 cputimer_intr_restart(); 1044 } else if (error) { 1045 kprintf("no suitable intr cputimer found\n"); 1046 1047 /* Restore */ 1048 sc->cst_cx_lowest_req = old_lowest_req; 1049 sc->cst_cx_lowest = old_lowest; 1050 atomic_fetchadd_int(&cpu_c3_ncpus, -1); 1051 } 1052 } 1053 } 1054 1055 if (error) 1056 return error; 1057 1058 /* Cache the new lowest non-C3 state. */ 1059 acpi_cpu_cx_non_c3(sc); 1060 1061 /* Reset the statistics counters. */ 1062 bzero(sc->cst_cx_stats, sizeof(sc->cst_cx_stats)); 1063 return (0); 1064 } 1065 1066 static void 1067 acpi_cst_set_lowest_handler(netmsg_t msg) 1068 { 1069 struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg; 1070 int error; 1071 1072 error = acpi_cpu_set_cx_lowest_oncpu(rmsg->sc, rmsg->val); 1073 lwkt_replymsg(&rmsg->base.lmsg, error); 1074 } 1075 1076 static int 1077 acpi_cpu_set_cx_lowest(struct acpi_cst_softc *sc, int val) 1078 { 1079 struct netmsg_acpi_cst msg; 1080 1081 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 1082 acpi_cst_set_lowest_handler); 1083 msg.sc = sc; 1084 msg.val = val; 1085 1086 return lwkt_domsg(netisr_cpuport(sc->cst_cpuid), &msg.base.lmsg, 0); 1087 } 1088 1089 static int 1090 acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1091 { 1092 struct acpi_cst_softc *sc; 1093 char state[8]; 1094 int val, error; 1095 1096 sc = (struct acpi_cst_softc *)arg1; 1097 ksnprintf(state, sizeof(state), "C%d", sc->cst_cx_lowest_req + 1); 1098 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1099 if (error != 0 || req->newptr == NULL) 1100 return (error); 1101 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1102 return (EINVAL); 1103 val = (int) strtol(state + 1, NULL, 10) - 1; 1104 if (val < 0) 1105 return (EINVAL); 1106 1107 lwkt_serialize_enter(&cpu_cx_slize); 1108 error = acpi_cpu_set_cx_lowest(sc, val); 1109 lwkt_serialize_exit(&cpu_cx_slize); 1110 1111 return error; 1112 } 1113 1114 static int 1115 acpi_cpu_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS) 1116 { 1117 struct acpi_cst_softc *sc; 1118 char state[8]; 1119 1120 sc = (struct acpi_cst_softc *)arg1; 1121 ksnprintf(state, sizeof(state), "C%d", sc->cst_cx_lowest + 1); 1122 return sysctl_handle_string(oidp, state, sizeof(state), req); 1123 } 1124 1125 static int 1126 acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1127 { 1128 struct acpi_cst_softc *sc; 1129 char state[8]; 1130 int val, error, i; 1131 1132 ksnprintf(state, sizeof(state), "C%d", cpu_cx_lowest_req + 1); 1133 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1134 if (error != 0 || req->newptr == NULL) 1135 return (error); 1136 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1137 return (EINVAL); 1138 val = (int) strtol(state + 1, NULL, 10) - 1; 1139 if (val < 0) 1140 return (EINVAL); 1141 1142 lwkt_serialize_enter(&cpu_cx_slize); 1143 1144 cpu_cx_lowest_req = val; 1145 cpu_cx_lowest = val; 1146 if (cpu_cx_lowest > cpu_cx_count - 1) 1147 cpu_cx_lowest = cpu_cx_count - 1; 1148 1149 /* Update the new lowest useable Cx state for all CPUs. */ 1150 for (i = 0; i < cpu_ndevices; i++) { 1151 sc = device_get_softc(cpu_devices[i]); 1152 error = acpi_cpu_set_cx_lowest(sc, val); 1153 if (error) { 1154 KKASSERT(i == 0); 1155 break; 1156 } 1157 } 1158 1159 lwkt_serialize_exit(&cpu_cx_slize); 1160 1161 return error; 1162 } 1163 1164 static int 1165 acpi_cpu_global_cx_lowest_use_sysctl(SYSCTL_HANDLER_ARGS) 1166 { 1167 char state[8]; 1168 1169 ksnprintf(state, sizeof(state), "C%d", cpu_cx_lowest + 1); 1170 return sysctl_handle_string(oidp, state, sizeof(state), req); 1171 } 1172 1173 /* 1174 * Put the CPU in C1 in a machine-dependant way. 1175 * XXX: shouldn't be here! 1176 */ 1177 static void 1178 acpi_cpu_c1(void) 1179 { 1180 #ifdef __ia64__ 1181 ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0); 1182 #else 1183 splz(); 1184 if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) 1185 __asm __volatile("sti; hlt"); 1186 else 1187 __asm __volatile("sti; pause"); 1188 #endif /* !__ia64__ */ 1189 } 1190 1191 static void 1192 acpi_cpu_cx_non_c3(struct acpi_cst_softc *sc) 1193 { 1194 int i; 1195 1196 sc->cst_non_c3 = 0; 1197 for (i = sc->cst_cx_lowest; i >= 0; i--) { 1198 if (sc->cst_cx_states[i].type < ACPI_STATE_C3) { 1199 sc->cst_non_c3 = i; 1200 break; 1201 } 1202 } 1203 if (bootverbose) 1204 device_printf(sc->cst_dev, "non-C3 %d\n", sc->cst_non_c3); 1205 } 1206 1207 /* 1208 * Update the largest Cx state supported in the global cpu_cx_count. 1209 * It will be used in the global Cx sysctl handler. 1210 */ 1211 static void 1212 acpi_cpu_global_cx_count(void) 1213 { 1214 struct acpi_cst_softc *sc; 1215 int i; 1216 1217 if (cpu_ndevices == 0) { 1218 cpu_cx_count = 0; 1219 return; 1220 } 1221 1222 sc = device_get_softc(cpu_devices[0]); 1223 cpu_cx_count = sc->cst_cx_count; 1224 1225 for (i = 1; i < cpu_ndevices; i++) { 1226 struct acpi_cst_softc *sc = device_get_softc(cpu_devices[i]); 1227 1228 if (sc->cst_cx_count < cpu_cx_count) 1229 cpu_cx_count = sc->cst_cx_count; 1230 } 1231 if (bootverbose) 1232 kprintf("cpu_cst: global Cx count %d\n", cpu_cx_count); 1233 } 1234