1 /* $OpenBSD: acpicpu.c,v 1.95 2024/10/22 21:50:02 jsg Exp $ */ 2 /* 3 * Copyright (c) 2005 Marco Peereboom <marco@openbsd.org> 4 * Copyright (c) 2015 Philip Guenther <guenther@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/kernel.h> /* for tick */ 21 #include <sys/signalvar.h> 22 #include <sys/sysctl.h> 23 #include <sys/systm.h> 24 #include <sys/device.h> 25 #include <sys/malloc.h> 26 #include <sys/queue.h> 27 #include <sys/atomic.h> 28 29 #include <machine/bus.h> 30 #include <machine/cpu.h> 31 #include <machine/cpufunc.h> 32 #include <machine/specialreg.h> 33 34 #include <dev/acpi/acpireg.h> 35 #include <dev/acpi/acpivar.h> 36 #include <dev/acpi/acpidev.h> 37 #include <dev/acpi/amltypes.h> 38 #include <dev/acpi/dsdt.h> 39 40 #include <sys/sensors.h> 41 42 int acpicpu_match(struct device *, void *, void *); 43 void acpicpu_attach(struct device *, struct device *, void *); 44 int acpicpu_notify(struct aml_node *, int, void *); 45 void acpicpu_setperf(int); 46 void acpicpu_setperf_ppc_change(struct acpicpu_pss *, int); 47 48 #define ACPI_STATE_C0 0x00 49 #define ACPI_STATE_C1 0x01 50 #define ACPI_STATE_C2 0x02 51 #define ACPI_STATE_C3 0x03 52 53 #define ACPI_PDC_REVID 0x1 54 #define ACPI_PDC_SMP 0xa 55 #define ACPI_PDC_MSR 0x1 56 57 /* _PDC/_OSC Intel capabilities flags */ 58 #define ACPI_PDC_P_FFH 0x0001 59 #define ACPI_PDC_C_C1_HALT 0x0002 60 #define ACPI_PDC_T_FFH 0x0004 61 #define ACPI_PDC_SMP_C1PT 0x0008 62 #define ACPI_PDC_SMP_C2C3 0x0010 63 #define ACPI_PDC_SMP_P_SWCOORD 0x0020 64 #define ACPI_PDC_SMP_C_SWCOORD 0x0040 65 #define ACPI_PDC_SMP_T_SWCOORD 0x0080 66 #define ACPI_PDC_C_C1_FFH 0x0100 67 #define ACPI_PDC_C_C2C3_FFH 0x0200 68 /* reserved 0x0400 */ 69 #define ACPI_PDC_P_HWCOORD 0x0800 70 #define ACPI_PDC_PPC_NOTIFY 0x1000 71 72 #define CST_METH_HALT 0 73 #define CST_METH_IO_HALT 1 74 #define CST_METH_MWAIT 2 75 #define CST_METH_GAS_IO 3 76 77 /* flags on Intel's FFH mwait method */ 78 #define CST_FLAG_MWAIT_HW_COORD 0x1 79 #define CST_FLAG_MWAIT_BM_AVOIDANCE 0x2 80 #define CST_FLAG_FALLBACK 0x4000 /* fallback for broken _CST */ 81 #define CST_FLAG_SKIP 0x8000 /* state is worse choice */ 82 83 #define FLAGS_MWAIT_ONLY 0x02 84 #define FLAGS_BMCHECK 0x04 85 #define FLAGS_NOTHROTTLE 0x08 86 #define FLAGS_NOPSS 0x10 87 #define FLAGS_NOPCT 0x20 88 89 #define CPU_THT_EN (1L << 4) 90 #define CPU_MAXSTATE(sc) (1L << (sc)->sc_duty_wid) 91 #define CPU_STATE(sc,pct) ((pct * CPU_MAXSTATE(sc) / 100) << (sc)->sc_duty_off) 92 #define CPU_STATEMASK(sc) ((CPU_MAXSTATE(sc) - 1) << (sc)->sc_duty_off) 93 94 #define ACPI_MAX_C2_LATENCY 100 95 #define ACPI_MAX_C3_LATENCY 1000 96 97 #define CSD_COORD_SW_ALL 0xFC 98 #define CSD_COORD_SW_ANY 0xFD 99 #define CSD_COORD_HW_ALL 0xFE 100 101 /* Make sure throttling bits are valid,a=addr,o=offset,w=width */ 102 #define valid_throttle(o,w,a) (a && w && (o+w)<=31 && (o>4 || (o+w)<=4)) 103 104 struct acpi_cstate { 105 SLIST_ENTRY(acpi_cstate) link; 106 107 u_short state; 108 short method; /* CST_METH_* */ 109 u_short flags; /* CST_FLAG_* */ 110 u_short latency; 111 int power; 112 uint64_t address; /* or mwait hint */ 113 }; 114 115 unsigned long cst_stats[4] = { 0 }; 116 117 struct acpicpu_softc { 118 struct device sc_dev; 119 int sc_cpu; 120 121 int sc_duty_wid; 122 int sc_duty_off; 123 uint32_t sc_pblk_addr; 124 int sc_pblk_len; 125 int sc_flags; 126 unsigned long sc_prev_sleep; 127 unsigned long sc_last_itime; 128 129 struct cpu_info *sc_ci; 130 SLIST_HEAD(,acpi_cstate) sc_cstates; 131 132 bus_space_tag_t sc_iot; 133 bus_space_handle_t sc_ioh; 134 135 struct acpi_softc *sc_acpi; 136 struct aml_node *sc_devnode; 137 138 int sc_pss_len; /* XXX */ 139 int sc_ppc; 140 int sc_level; 141 struct acpicpu_pss *sc_pss; 142 size_t sc_pssfulllen; 143 144 struct acpicpu_pct sc_pct; 145 /* save compensation for pct access for lying bios' */ 146 uint32_t sc_pct_stat_as; 147 uint32_t sc_pct_ctrl_as; 148 uint32_t sc_pct_stat_len; 149 uint32_t sc_pct_ctrl_len; 150 /* 151 * XXX: _PPC Change listener 152 * PPC changes can occur when for example a machine is disconnected 153 * from AC power and can no longer support the highest frequency or 154 * voltage when driven from the battery. 155 * Should probably be reimplemented as a list for now we assume only 156 * one listener 157 */ 158 void (*sc_notify)(struct acpicpu_pss *, int); 159 }; 160 161 void acpicpu_add_cstatepkg(struct aml_value *, void *); 162 void acpicpu_add_cdeppkg(struct aml_value *, void *); 163 int acpicpu_getppc(struct acpicpu_softc *); 164 int acpicpu_getpct(struct acpicpu_softc *); 165 int acpicpu_getpss(struct acpicpu_softc *); 166 int acpicpu_getcst(struct acpicpu_softc *); 167 void acpicpu_getcst_from_fadt(struct acpicpu_softc *); 168 void acpicpu_print_one_cst(struct acpi_cstate *_cx); 169 void acpicpu_print_cst(struct acpicpu_softc *_sc); 170 void acpicpu_add_cstate(struct acpicpu_softc *_sc, int _state, int _method, 171 int _flags, int _latency, int _power, uint64_t _address); 172 void acpicpu_set_pdc(struct acpicpu_softc *); 173 void acpicpu_idle(void); 174 void acpicpu_suspend(void); 175 176 #if 0 177 void acpicpu_set_throttle(struct acpicpu_softc *, int); 178 struct acpi_cstate *acpicpu_find_cstate(struct acpicpu_softc *, int); 179 #endif 180 181 const struct cfattach acpicpu_ca = { 182 sizeof(struct acpicpu_softc), acpicpu_match, acpicpu_attach 183 }; 184 185 struct cfdriver acpicpu_cd = { 186 NULL, "acpicpu", DV_DULL 187 }; 188 189 const char *acpicpu_hids[] = { 190 "ACPI0007", 191 NULL 192 }; 193 194 extern int setperf_prio; 195 196 #if 0 197 void 198 acpicpu_set_throttle(struct acpicpu_softc *sc, int level) 199 { 200 uint32_t pbval; 201 202 if (sc->sc_flags & FLAGS_NOTHROTTLE) 203 return; 204 205 /* Disable throttling control */ 206 pbval = inl(sc->sc_pblk_addr); 207 outl(sc->sc_pblk_addr, pbval & ~CPU_THT_EN); 208 if (level < 100) { 209 pbval &= ~CPU_STATEMASK(sc); 210 pbval |= CPU_STATE(sc, level); 211 outl(sc->sc_pblk_addr, pbval & ~CPU_THT_EN); 212 outl(sc->sc_pblk_addr, pbval | CPU_THT_EN); 213 } 214 } 215 216 struct acpi_cstate * 217 acpicpu_find_cstate(struct acpicpu_softc *sc, int state) 218 { 219 struct acpi_cstate *cx; 220 221 SLIST_FOREACH(cx, &sc->sc_cstates, link) 222 if (cx->state == state) 223 return cx; 224 return (NULL); 225 } 226 #endif 227 228 229 void 230 acpicpu_set_pdc(struct acpicpu_softc *sc) 231 { 232 struct aml_value cmd, osc_cmd[4]; 233 struct aml_value res; 234 uint32_t cap; 235 uint32_t buf[3]; 236 237 /* 4077A616-290C-47BE-9EBD-D87058713953 */ 238 static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 239 0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70, 240 0x58, 0x71, 0x39, 0x53 }; 241 cap = ACPI_PDC_C_C1_HALT | ACPI_PDC_P_FFH | ACPI_PDC_C_C1_FFH 242 | ACPI_PDC_C_C2C3_FFH | ACPI_PDC_SMP_P_SWCOORD | ACPI_PDC_SMP_C2C3 243 | ACPI_PDC_SMP_C1PT; 244 245 if (aml_searchname(sc->sc_devnode, "_OSC")) { 246 /* Query _OSC */ 247 memset(&osc_cmd, 0, sizeof(osc_cmd)); 248 osc_cmd[0].type = AML_OBJTYPE_BUFFER; 249 osc_cmd[0].v_buffer = (uint8_t *)&cpu_oscuuid; 250 osc_cmd[0].length = sizeof(cpu_oscuuid); 251 252 osc_cmd[1].type = AML_OBJTYPE_INTEGER; 253 osc_cmd[1].v_integer = 1; 254 osc_cmd[1].length = 1; 255 256 osc_cmd[2].type = AML_OBJTYPE_INTEGER; 257 osc_cmd[2].v_integer = 2; 258 osc_cmd[2].length = 1; 259 260 buf[0] = 1; 261 buf[1] = cap; 262 osc_cmd[3].type = AML_OBJTYPE_BUFFER; 263 osc_cmd[3].v_buffer = (int8_t *)&buf; 264 osc_cmd[3].length = sizeof(buf); 265 266 aml_evalname(sc->sc_acpi, sc->sc_devnode, "_OSC", 267 4, osc_cmd, &res); 268 269 if (res.type != AML_OBJTYPE_BUFFER || res.length < 8) { 270 printf(": unable to query capabilities\n"); 271 aml_freevalue(&res); 272 return; 273 } 274 275 /* Evaluate _OSC */ 276 memset(&osc_cmd, 0, sizeof(osc_cmd)); 277 osc_cmd[0].type = AML_OBJTYPE_BUFFER; 278 osc_cmd[0].v_buffer = (uint8_t *)&cpu_oscuuid; 279 osc_cmd[0].length = sizeof(cpu_oscuuid); 280 281 osc_cmd[1].type = AML_OBJTYPE_INTEGER; 282 osc_cmd[1].v_integer = 1; 283 osc_cmd[1].length = 1; 284 285 osc_cmd[2].type = AML_OBJTYPE_INTEGER; 286 osc_cmd[2].v_integer = 2; 287 osc_cmd[2].length = 1; 288 289 buf[0] = 0; 290 buf[1] = (*(uint32_t *)&res.v_buffer[4]) & cap; 291 osc_cmd[3].type = AML_OBJTYPE_BUFFER; 292 osc_cmd[3].v_buffer = (int8_t *)&buf; 293 osc_cmd[3].length = sizeof(buf); 294 295 aml_freevalue(&res); 296 297 aml_evalname(sc->sc_acpi, sc->sc_devnode, "_OSC", 298 4, osc_cmd, NULL); 299 } else { 300 /* Evaluate _PDC */ 301 memset(&cmd, 0, sizeof(cmd)); 302 cmd.type = AML_OBJTYPE_BUFFER; 303 cmd.v_buffer = (uint8_t *)&buf; 304 cmd.length = sizeof(buf); 305 306 buf[0] = ACPI_PDC_REVID; 307 buf[1] = 1; 308 buf[2] = cap; 309 310 aml_evalname(sc->sc_acpi, sc->sc_devnode, "_PDC", 311 1, &cmd, NULL); 312 } 313 } 314 315 /* 316 * sanity check mwait hints against what cpuid told us 317 * ...but because intel screwed up, just check whether cpuid says 318 * the given state has _any_ substates. 319 */ 320 static int 321 check_mwait_hints(int state, int hints) 322 { 323 int cstate; 324 int num_substates; 325 326 if (cpu_mwait_size == 0) 327 return (0); 328 cstate = ((hints >> 4) & 0xf) + 1; 329 if (cstate == 16) 330 cstate = 0; 331 else if (cstate > 7) { 332 /* out of range of test against CPUID; just trust'em */ 333 return (1); 334 } 335 num_substates = (cpu_mwait_states >> (4 * cstate)) & 0xf; 336 if (num_substates == 0) { 337 printf(": C%d bad (state %d has no substates)", state, cstate); 338 return (0); 339 } 340 return (1); 341 } 342 343 void 344 acpicpu_add_cstate(struct acpicpu_softc *sc, int state, int method, 345 int flags, int latency, int power, uint64_t address) 346 { 347 struct acpi_cstate *cx; 348 349 dnprintf(10," C%d: latency:.%4x power:%.4x addr:%.16llx\n", 350 state, latency, power, address); 351 352 /* add a new state, or overwrite the fallback C1 state? */ 353 if (state != ACPI_STATE_C1 || 354 (cx = SLIST_FIRST(&sc->sc_cstates)) == NULL || 355 (cx->flags & CST_FLAG_FALLBACK) == 0) { 356 cx = malloc(sizeof(*cx), M_DEVBUF, M_WAITOK); 357 SLIST_INSERT_HEAD(&sc->sc_cstates, cx, link); 358 } 359 360 cx->state = state; 361 cx->method = method; 362 cx->flags = flags; 363 cx->latency = latency; 364 cx->power = power; 365 cx->address = address; 366 } 367 368 /* Found a _CST object, add new cstate for each entry */ 369 void 370 acpicpu_add_cstatepkg(struct aml_value *val, void *arg) 371 { 372 struct acpicpu_softc *sc = arg; 373 uint64_t addr; 374 struct acpi_grd *grd; 375 int state, method, flags; 376 377 #if defined(ACPI_DEBUG) && !defined(SMALL_KERNEL) 378 aml_showvalue(val); 379 #endif 380 if (val->type != AML_OBJTYPE_PACKAGE || val->length != 4) 381 return; 382 383 /* range and sanity checks */ 384 state = val->v_package[1]->v_integer; 385 if (state < 0 || state > 4) 386 return; 387 if (val->v_package[0]->type != AML_OBJTYPE_BUFFER) { 388 printf(": C%d (unexpected ACPI object type %d)", 389 state, val->v_package[0]->type); 390 return; 391 } 392 grd = (struct acpi_grd *)val->v_package[0]->v_buffer; 393 if (val->v_package[0]->length != sizeof(*grd) + 2 || 394 grd->grd_descriptor != LR_GENREGISTER || 395 grd->grd_length != sizeof(grd->grd_gas) || 396 val->v_package[0]->v_buffer[sizeof(*grd)] != SRT_ENDTAG) { 397 printf(": C%d (bogo buffer)", state); 398 return; 399 } 400 401 flags = 0; 402 switch (grd->grd_gas.address_space_id) { 403 case GAS_FUNCTIONAL_FIXED: 404 if (grd->grd_gas.register_bit_width == 0) { 405 method = CST_METH_HALT; 406 addr = 0; 407 } else { 408 /* 409 * In theory we should only do this for 410 * vendor 1 == Intel but other values crop up, 411 * presumably due to the normal ACPI spec confusion. 412 */ 413 switch (grd->grd_gas.register_bit_offset) { 414 case 0x1: 415 method = CST_METH_IO_HALT; 416 addr = grd->grd_gas.address; 417 418 /* i386 and amd64 I/O space is 16bits */ 419 if (addr > 0xffff) { 420 printf(": C%d (bogo I/O addr %llx)", 421 state, addr); 422 return; 423 } 424 break; 425 case 0x2: 426 addr = grd->grd_gas.address; 427 if (!check_mwait_hints(state, addr)) 428 return; 429 method = CST_METH_MWAIT; 430 flags = grd->grd_gas.access_size; 431 break; 432 default: 433 printf(": C%d (unknown FFH class %d)", 434 state, grd->grd_gas.register_bit_offset); 435 return; 436 } 437 } 438 break; 439 440 case GAS_SYSTEM_IOSPACE: 441 addr = grd->grd_gas.address; 442 if (grd->grd_gas.register_bit_width != 8 || 443 grd->grd_gas.register_bit_offset != 0) { 444 printf(": C%d (unhandled %s spec: %d/%d)", state, 445 "I/O", grd->grd_gas.register_bit_width, 446 grd->grd_gas.register_bit_offset); 447 return; 448 } 449 method = CST_METH_GAS_IO; 450 break; 451 452 default: 453 /* dump the GAS for analysis */ 454 { 455 int i; 456 printf(": C%d (unhandled GAS:", state); 457 for (i = 0; i < sizeof(grd->grd_gas); i++) 458 printf(" %#x", ((u_char *)&grd->grd_gas)[i]); 459 printf(")"); 460 461 } 462 return; 463 } 464 465 acpicpu_add_cstate(sc, state, method, flags, 466 val->v_package[2]->v_integer, val->v_package[3]->v_integer, addr); 467 } 468 469 470 /* Found a _CSD object, print the dependency */ 471 void 472 acpicpu_add_cdeppkg(struct aml_value *val, void *arg) 473 { 474 int64_t num_proc, coord_type, domain, cindex; 475 476 /* 477 * errors: unexpected object type, bad length, mismatched length, 478 * and bad CSD revision 479 */ 480 if (val->type != AML_OBJTYPE_PACKAGE || val->length < 6 || 481 val->length != val->v_package[0]->v_integer || 482 val->v_package[1]->v_integer != 0) { 483 #if 1 || defined(ACPI_DEBUG) && !defined(SMALL_KERNEL) 484 aml_showvalue(val); 485 #endif 486 printf("bogus CSD\n"); 487 return; 488 } 489 490 /* coordinating 'among' one CPU is trivial, ignore */ 491 num_proc = val->v_package[4]->v_integer; 492 if (num_proc == 1) 493 return; 494 495 /* we practically assume the hardware will coordinate, so ignore */ 496 coord_type = val->v_package[3]->v_integer; 497 if (coord_type == CSD_COORD_HW_ALL) 498 return; 499 500 domain = val->v_package[2]->v_integer; 501 cindex = val->v_package[5]->v_integer; 502 printf(": CSD (c=%#llx d=%lld n=%lld i=%lli)", 503 coord_type, domain, num_proc, cindex); 504 } 505 506 int 507 acpicpu_getcst(struct acpicpu_softc *sc) 508 { 509 struct aml_value res; 510 struct acpi_cstate *cx, *next_cx; 511 int use_nonmwait; 512 513 /* delete the existing list */ 514 while ((cx = SLIST_FIRST(&sc->sc_cstates)) != NULL) { 515 SLIST_REMOVE_HEAD(&sc->sc_cstates, link); 516 free(cx, M_DEVBUF, sizeof(*cx)); 517 } 518 519 /* provide a fallback C1-via-halt in case _CST's C1 is bogus */ 520 acpicpu_add_cstate(sc, ACPI_STATE_C1, CST_METH_HALT, 521 CST_FLAG_FALLBACK, 1, -1, 0); 522 523 if (aml_evalname(sc->sc_acpi, sc->sc_devnode, "_CST", 0, NULL, &res)) 524 return (1); 525 526 aml_foreachpkg(&res, 1, acpicpu_add_cstatepkg, sc); 527 aml_freevalue(&res); 528 529 /* only have fallback state? then no _CST objects were understood */ 530 cx = SLIST_FIRST(&sc->sc_cstates); 531 if (cx->flags & CST_FLAG_FALLBACK) 532 return (1); 533 534 /* 535 * Skip states >= C2 if the CPU's LAPIC timer stops in deep 536 * states (i.e., it doesn't have the 'ARAT' bit set). 537 * Also keep track if all the states we'll use use mwait. 538 */ 539 use_nonmwait = 0; 540 while ((next_cx = SLIST_NEXT(cx, link)) != NULL) { 541 if (cx->state > 1 && 542 (sc->sc_ci->ci_feature_tpmflags & TPM_ARAT) == 0) 543 cx->flags |= CST_FLAG_SKIP; 544 else if (cx->method != CST_METH_MWAIT) 545 use_nonmwait = 1; 546 cx = next_cx; 547 } 548 if (use_nonmwait) 549 sc->sc_flags &= ~FLAGS_MWAIT_ONLY; 550 else 551 sc->sc_flags |= FLAGS_MWAIT_ONLY; 552 553 if (!aml_evalname(sc->sc_acpi, sc->sc_devnode, "_CSD", 0, NULL, &res)) { 554 aml_foreachpkg(&res, 1, acpicpu_add_cdeppkg, sc); 555 aml_freevalue(&res); 556 } 557 558 return (0); 559 } 560 561 /* 562 * old-style fixed C-state info in the FADT. 563 * Note that this has extra restrictions on values and flags. 564 */ 565 void 566 acpicpu_getcst_from_fadt(struct acpicpu_softc *sc) 567 { 568 struct acpi_fadt *fadt = sc->sc_acpi->sc_fadt; 569 int flags; 570 571 /* FADT has to set flag to do C2 and higher on MP */ 572 if ((fadt->flags & FADT_P_LVL2_UP) == 0 && ncpus > 1) 573 return; 574 575 /* skip these C2 and C3 states if the CPU doesn't have ARAT */ 576 flags = (sc->sc_ci->ci_feature_tpmflags & TPM_ARAT) 577 ? 0 : CST_FLAG_SKIP; 578 579 /* Some systems don't export a full PBLK; reduce functionality */ 580 if (sc->sc_pblk_len >= 5 && fadt->p_lvl2_lat <= ACPI_MAX_C2_LATENCY) { 581 acpicpu_add_cstate(sc, ACPI_STATE_C2, CST_METH_GAS_IO, flags, 582 fadt->p_lvl2_lat, -1, sc->sc_pblk_addr + 4); 583 } 584 if (sc->sc_pblk_len >= 6 && fadt->p_lvl3_lat <= ACPI_MAX_C3_LATENCY) 585 acpicpu_add_cstate(sc, ACPI_STATE_C3, CST_METH_GAS_IO, flags, 586 fadt->p_lvl3_lat, -1, sc->sc_pblk_addr + 5); 587 } 588 589 590 void 591 acpicpu_print_one_cst(struct acpi_cstate *cx) 592 { 593 const char *meth = ""; 594 int show_addr = 0; 595 596 switch (cx->method) { 597 case CST_METH_IO_HALT: 598 show_addr = 1; 599 /* fallthrough */ 600 case CST_METH_HALT: 601 meth = " halt"; 602 break; 603 604 case CST_METH_MWAIT: 605 meth = " mwait"; 606 show_addr = cx->address != 0; 607 break; 608 609 case CST_METH_GAS_IO: 610 meth = " io"; 611 show_addr = 1; 612 break; 613 614 } 615 616 printf(" %sC%d(", (cx->flags & CST_FLAG_SKIP ? "!" : ""), cx->state); 617 if (cx->power != -1) 618 printf("%d", cx->power); 619 printf("@%d%s", cx->latency, meth); 620 if (cx->flags & ~CST_FLAG_SKIP) { 621 if (cx->flags & CST_FLAG_FALLBACK) 622 printf("!"); 623 else 624 printf(".%x", (cx->flags & ~CST_FLAG_SKIP)); 625 } 626 if (show_addr) 627 printf("@0x%llx", cx->address); 628 printf(")"); 629 } 630 631 void 632 acpicpu_print_cst(struct acpicpu_softc *sc) 633 { 634 struct acpi_cstate *cx; 635 int i; 636 637 if (!SLIST_EMPTY(&sc->sc_cstates)) { 638 printf(":"); 639 640 i = 0; 641 SLIST_FOREACH(cx, &sc->sc_cstates, link) { 642 if (i++) 643 printf(","); 644 acpicpu_print_one_cst(cx); 645 } 646 } 647 } 648 649 650 int 651 acpicpu_match(struct device *parent, void *match, void *aux) 652 { 653 struct acpi_attach_args *aa = aux; 654 struct cfdata *cf = match; 655 struct acpi_softc *acpi = (struct acpi_softc *)parent; 656 CPU_INFO_ITERATOR cii; 657 struct cpu_info *ci; 658 int64_t uid; 659 660 if (acpi_matchhids(aa, acpicpu_hids, cf->cf_driver->cd_name) && 661 aa->aaa_node && aa->aaa_node->value && 662 aa->aaa_node->value->type == AML_OBJTYPE_DEVICE) { 663 /* 664 * Record that we've seen a Device() CPU object, 665 * so we won't attach any Processor() nodes. 666 */ 667 acpi->sc_skip_processor = 1; 668 669 /* Only match if we can find a CPU with the right ID */ 670 if (aml_evalinteger(acpi, aa->aaa_node, "_UID", 0, 671 NULL, &uid) == 0) 672 CPU_INFO_FOREACH(cii, ci) 673 if (ci->ci_acpi_proc_id == uid) 674 return (1); 675 676 return (0); 677 } 678 679 /* sanity */ 680 if (aa->aaa_name == NULL || 681 strcmp(aa->aaa_name, cf->cf_driver->cd_name) != 0 || 682 aa->aaa_table != NULL) 683 return (0); 684 685 return (1); 686 } 687 688 void 689 acpicpu_attach(struct device *parent, struct device *self, void *aux) 690 { 691 struct acpicpu_softc *sc = (struct acpicpu_softc *)self; 692 struct acpi_attach_args *aa = aux; 693 struct aml_value res; 694 int64_t uid; 695 int i; 696 uint32_t status = 0; 697 CPU_INFO_ITERATOR cii; 698 struct cpu_info *ci; 699 700 sc->sc_acpi = (struct acpi_softc *)parent; 701 sc->sc_devnode = aa->aaa_node; 702 703 SLIST_INIT(&sc->sc_cstates); 704 705 if (aml_evalinteger(sc->sc_acpi, sc->sc_devnode, 706 "_UID", 0, NULL, &uid) == 0) 707 sc->sc_cpu = uid; 708 709 if (aml_evalnode(sc->sc_acpi, sc->sc_devnode, 0, NULL, &res) == 0) { 710 if (res.type == AML_OBJTYPE_PROCESSOR) { 711 sc->sc_cpu = res.v_processor.proc_id; 712 sc->sc_pblk_addr = res.v_processor.proc_addr; 713 sc->sc_pblk_len = res.v_processor.proc_len; 714 } 715 aml_freevalue(&res); 716 } 717 sc->sc_duty_off = sc->sc_acpi->sc_fadt->duty_offset; 718 sc->sc_duty_wid = sc->sc_acpi->sc_fadt->duty_width; 719 720 /* link in the matching cpu_info */ 721 CPU_INFO_FOREACH(cii, ci) 722 if (ci->ci_acpi_proc_id == sc->sc_cpu) { 723 ci->ci_acpicpudev = self; 724 sc->sc_ci = ci; 725 break; 726 } 727 if (ci == NULL) { 728 printf(": no cpu matching ACPI ID %d\n", sc->sc_cpu); 729 return; 730 } 731 732 sc->sc_prev_sleep = 1000000; 733 734 acpicpu_set_pdc(sc); 735 736 if (!valid_throttle(sc->sc_duty_off, sc->sc_duty_wid, sc->sc_pblk_addr)) 737 sc->sc_flags |= FLAGS_NOTHROTTLE; 738 #ifdef ACPI_DEBUG 739 printf(": %s: ", sc->sc_devnode->name); 740 printf("\n: hdr:%x pblk:%x,%x duty:%x,%x pstate:%x " 741 "(%ld throttling states)\n", sc->sc_acpi->sc_fadt->hdr_revision, 742 sc->sc_pblk_addr, sc->sc_pblk_len, sc->sc_duty_off, 743 sc->sc_duty_wid, sc->sc_acpi->sc_fadt->pstate_cnt, 744 CPU_MAXSTATE(sc)); 745 #endif 746 747 /* Get C-States from _CST or FADT */ 748 if (acpicpu_getcst(sc) || SLIST_EMPTY(&sc->sc_cstates)) 749 acpicpu_getcst_from_fadt(sc); 750 else { 751 /* Notify BIOS we use _CST objects */ 752 if (sc->sc_acpi->sc_fadt->cst_cnt) { 753 acpi_write_pmreg(sc->sc_acpi, ACPIREG_SMICMD, 0, 754 sc->sc_acpi->sc_fadt->cst_cnt); 755 } 756 } 757 if (!SLIST_EMPTY(&sc->sc_cstates)) { 758 extern uint32_t acpi_force_bm; 759 760 cpu_idle_cycle_fcn = &acpicpu_idle; 761 cpu_suspend_cycle_fcn = &acpicpu_suspend; 762 763 /* 764 * C3 (and maybe C2?) needs BM_RLD to be set to 765 * wake the system 766 */ 767 if (SLIST_FIRST(&sc->sc_cstates)->state > 1 && acpi_force_bm == 0) { 768 uint16_t en = acpi_read_pmreg(sc->sc_acpi, 769 ACPIREG_PM1_CNT, 0); 770 if ((en & ACPI_PM1_BM_RLD) == 0) { 771 acpi_write_pmreg(sc->sc_acpi, ACPIREG_PM1_CNT, 772 0, en | ACPI_PM1_BM_RLD); 773 acpi_force_bm = ACPI_PM1_BM_RLD; 774 } 775 } 776 } 777 778 if (acpicpu_getpss(sc)) { 779 sc->sc_flags |= FLAGS_NOPSS; 780 } else { 781 #ifdef ACPI_DEBUG 782 for (i = 0; i < sc->sc_pss_len; i++) { 783 dnprintf(20, "%d %d %d %d %d %d\n", 784 sc->sc_pss[i].pss_core_freq, 785 sc->sc_pss[i].pss_power, 786 sc->sc_pss[i].pss_trans_latency, 787 sc->sc_pss[i].pss_bus_latency, 788 sc->sc_pss[i].pss_ctrl, 789 sc->sc_pss[i].pss_status); 790 } 791 dnprintf(20, "\n"); 792 #endif 793 if (sc->sc_pss_len == 0) { 794 /* this should never happen */ 795 printf("%s: invalid _PSS length\n", DEVNAME(sc)); 796 sc->sc_flags |= FLAGS_NOPSS; 797 } 798 799 acpicpu_getppc(sc); 800 if (acpicpu_getpct(sc)) 801 sc->sc_flags |= FLAGS_NOPCT; 802 else if (sc->sc_pss_len > 0) { 803 /* Notify BIOS we are handling p-states */ 804 if (sc->sc_acpi->sc_fadt->pstate_cnt) { 805 acpi_write_pmreg(sc->sc_acpi, ACPIREG_SMICMD, 806 0, sc->sc_acpi->sc_fadt->pstate_cnt); 807 } 808 809 aml_register_notify(sc->sc_devnode, NULL, 810 acpicpu_notify, sc, ACPIDEV_NOPOLL); 811 812 acpi_gasio(sc->sc_acpi, ACPI_IOREAD, 813 sc->sc_pct.pct_status.grd_gas.address_space_id, 814 sc->sc_pct.pct_status.grd_gas.address, 815 sc->sc_pct_stat_as, sc->sc_pct_stat_as, &status); 816 sc->sc_level = (100 / sc->sc_pss_len) * 817 (sc->sc_pss_len - status); 818 dnprintf(20, "%s: cpu index %d, percentage %d\n", 819 DEVNAME(sc), status, sc->sc_level); 820 if (setperf_prio < 30) { 821 cpu_setperf = acpicpu_setperf; 822 acpicpu_set_notify(acpicpu_setperf_ppc_change); 823 setperf_prio = 30; 824 acpi_hasprocfvs = 1; 825 } 826 } 827 } 828 829 /* 830 * Nicely enumerate what power management capabilities 831 * ACPI CPU provides. 832 */ 833 acpicpu_print_cst(sc); 834 if (!(sc->sc_flags & (FLAGS_NOPSS | FLAGS_NOPCT)) || 835 !(sc->sc_flags & FLAGS_NOPSS)) { 836 printf("%c ", SLIST_EMPTY(&sc->sc_cstates) ? ':' : ','); 837 838 /* 839 * If acpicpu is itself providing the capability to transition 840 * states, enumerate them in the fashion that est and powernow 841 * would. 842 */ 843 if (!(sc->sc_flags & (FLAGS_NOPSS | FLAGS_NOPCT))) { 844 printf("FVS, "); 845 for (i = 0; i < sc->sc_pss_len - 1; i++) 846 printf("%d, ", sc->sc_pss[i].pss_core_freq); 847 printf("%d MHz", sc->sc_pss[i].pss_core_freq); 848 } else 849 printf("PSS"); 850 } 851 852 printf("\n"); 853 } 854 855 int 856 acpicpu_getppc(struct acpicpu_softc *sc) 857 { 858 struct aml_value res; 859 860 sc->sc_ppc = 0; 861 862 if (aml_evalname(sc->sc_acpi, sc->sc_devnode, "_PPC", 0, NULL, &res)) { 863 dnprintf(10, "%s: no _PPC\n", DEVNAME(sc)); 864 return (1); 865 } 866 867 sc->sc_ppc = aml_val2int(&res); 868 dnprintf(10, "%s: _PPC: %d\n", DEVNAME(sc), sc->sc_ppc); 869 aml_freevalue(&res); 870 871 return (0); 872 } 873 874 int 875 acpicpu_getpct(struct acpicpu_softc *sc) 876 { 877 struct aml_value res; 878 int rv = 1; 879 880 if (aml_evalname(sc->sc_acpi, sc->sc_devnode, "_PCT", 0, NULL, &res)) { 881 dnprintf(20, "%s: no _PCT\n", DEVNAME(sc)); 882 return (1); 883 } 884 885 if (res.length != 2) { 886 dnprintf(20, "%s: %s: invalid _PCT length\n", DEVNAME(sc), 887 sc->sc_devnode->name); 888 return (1); 889 } 890 891 memcpy(&sc->sc_pct.pct_ctrl, res.v_package[0]->v_buffer, 892 sizeof sc->sc_pct.pct_ctrl); 893 if (sc->sc_pct.pct_ctrl.grd_gas.address_space_id == 894 GAS_FUNCTIONAL_FIXED) { 895 dnprintf(20, "CTRL GASIO is functional fixed hardware.\n"); 896 goto ffh; 897 } 898 899 memcpy(&sc->sc_pct.pct_status, res.v_package[1]->v_buffer, 900 sizeof sc->sc_pct.pct_status); 901 if (sc->sc_pct.pct_status.grd_gas.address_space_id == 902 GAS_FUNCTIONAL_FIXED) { 903 dnprintf(20, "CTRL GASIO is functional fixed hardware.\n"); 904 goto ffh; 905 } 906 907 dnprintf(10, "_PCT(ctrl) : %02x %04x %02x %02x %02x %02x %016llx\n", 908 sc->sc_pct.pct_ctrl.grd_descriptor, 909 sc->sc_pct.pct_ctrl.grd_length, 910 sc->sc_pct.pct_ctrl.grd_gas.address_space_id, 911 sc->sc_pct.pct_ctrl.grd_gas.register_bit_width, 912 sc->sc_pct.pct_ctrl.grd_gas.register_bit_offset, 913 sc->sc_pct.pct_ctrl.grd_gas.access_size, 914 sc->sc_pct.pct_ctrl.grd_gas.address); 915 916 dnprintf(10, "_PCT(status): %02x %04x %02x %02x %02x %02x %016llx\n", 917 sc->sc_pct.pct_status.grd_descriptor, 918 sc->sc_pct.pct_status.grd_length, 919 sc->sc_pct.pct_status.grd_gas.address_space_id, 920 sc->sc_pct.pct_status.grd_gas.register_bit_width, 921 sc->sc_pct.pct_status.grd_gas.register_bit_offset, 922 sc->sc_pct.pct_status.grd_gas.access_size, 923 sc->sc_pct.pct_status.grd_gas.address); 924 925 /* if not set assume single 32 bit access */ 926 sc->sc_pct_stat_as = sc->sc_pct.pct_status.grd_gas.register_bit_width 927 / 8; 928 if (sc->sc_pct_stat_as == 0) 929 sc->sc_pct_stat_as = 4; 930 sc->sc_pct_ctrl_as = sc->sc_pct.pct_ctrl.grd_gas.register_bit_width / 8; 931 if (sc->sc_pct_ctrl_as == 0) 932 sc->sc_pct_ctrl_as = 4; 933 sc->sc_pct_stat_len = sc->sc_pct.pct_status.grd_gas.access_size; 934 if (sc->sc_pct_stat_len == 0) 935 sc->sc_pct_stat_len = sc->sc_pct_stat_as; 936 sc->sc_pct_ctrl_len = sc->sc_pct.pct_ctrl.grd_gas.access_size; 937 if (sc->sc_pct_ctrl_len == 0) 938 sc->sc_pct_ctrl_len = sc->sc_pct_ctrl_as; 939 940 rv = 0; 941 ffh: 942 aml_freevalue(&res); 943 return (rv); 944 } 945 946 int 947 acpicpu_getpss(struct acpicpu_softc *sc) 948 { 949 struct aml_value res; 950 int i, c, cf; 951 952 if (aml_evalname(sc->sc_acpi, sc->sc_devnode, "_PSS", 0, NULL, &res)) { 953 dprintf("%s: no _PSS\n", DEVNAME(sc)); 954 return (1); 955 } 956 957 free(sc->sc_pss, M_DEVBUF, sc->sc_pssfulllen); 958 959 sc->sc_pss = mallocarray(res.length, sizeof(*sc->sc_pss), M_DEVBUF, 960 M_WAITOK | M_ZERO); 961 sc->sc_pssfulllen = res.length * sizeof(*sc->sc_pss); 962 963 c = 0; 964 for (i = 0; i < res.length; i++) { 965 cf = aml_val2int(res.v_package[i]->v_package[0]); 966 967 /* This heuristic comes from FreeBSDs 968 * dev/acpica/acpi_perf.c to weed out invalid PSS entries. 969 */ 970 if (cf == sc->sc_pss[c].pss_core_freq) { 971 printf("%s: struck PSS entry, core frequency equals " 972 " last\n", sc->sc_dev.dv_xname); 973 continue; 974 } 975 976 if (cf == 0xFFFF || cf == 0x9999 || cf == 99999 || cf == 0) { 977 printf("%s: struck PSS entry, inappropriate core " 978 "frequency value\n", sc->sc_dev.dv_xname); 979 continue; 980 } 981 982 sc->sc_pss[c].pss_core_freq = cf; 983 sc->sc_pss[c].pss_power = aml_val2int( 984 res.v_package[i]->v_package[1]); 985 sc->sc_pss[c].pss_trans_latency = aml_val2int( 986 res.v_package[i]->v_package[2]); 987 sc->sc_pss[c].pss_bus_latency = aml_val2int( 988 res.v_package[i]->v_package[3]); 989 sc->sc_pss[c].pss_ctrl = aml_val2int( 990 res.v_package[i]->v_package[4]); 991 sc->sc_pss[c].pss_status = aml_val2int( 992 res.v_package[i]->v_package[5]); 993 c++; 994 } 995 sc->sc_pss_len = c; 996 997 aml_freevalue(&res); 998 999 return (0); 1000 } 1001 1002 int 1003 acpicpu_fetch_pss(struct acpicpu_pss **pss) 1004 { 1005 struct acpicpu_softc *sc; 1006 1007 /* 1008 * XXX: According to the ACPI spec in an SMP system all processors 1009 * are supposed to support the same states. For now we pray 1010 * the bios ensures this... 1011 */ 1012 1013 sc = (struct acpicpu_softc *)cpu_info_primary.ci_acpicpudev; 1014 if (!sc) 1015 return 0; 1016 *pss = sc->sc_pss; 1017 1018 return (sc->sc_pss_len); 1019 } 1020 1021 int 1022 acpicpu_notify(struct aml_node *node, int notify_type, void *arg) 1023 { 1024 struct acpicpu_softc *sc = arg; 1025 1026 dnprintf(10, "acpicpu_notify: %.2x %s\n", notify_type, 1027 sc->sc_devnode->name); 1028 1029 switch (notify_type) { 1030 case 0x80: /* _PPC changed, retrieve new values */ 1031 acpicpu_getppc(sc); 1032 acpicpu_getpss(sc); 1033 if (sc->sc_notify) 1034 sc->sc_notify(sc->sc_pss, sc->sc_pss_len); 1035 break; 1036 1037 case 0x81: /* _CST changed, retrieve new values */ 1038 acpicpu_getcst(sc); 1039 printf("%s: notify", DEVNAME(sc)); 1040 acpicpu_print_cst(sc); 1041 printf("\n"); 1042 break; 1043 1044 default: 1045 printf("%s: unhandled cpu event %x\n", DEVNAME(sc), 1046 notify_type); 1047 break; 1048 } 1049 1050 return (0); 1051 } 1052 1053 void 1054 acpicpu_set_notify(void (*func)(struct acpicpu_pss *, int)) 1055 { 1056 struct acpicpu_softc *sc; 1057 1058 sc = (struct acpicpu_softc *)cpu_info_primary.ci_acpicpudev; 1059 if (sc != NULL) 1060 sc->sc_notify = func; 1061 } 1062 1063 void 1064 acpicpu_setperf_ppc_change(struct acpicpu_pss *pss, int npss) 1065 { 1066 struct acpicpu_softc *sc; 1067 1068 sc = (struct acpicpu_softc *)cpu_info_primary.ci_acpicpudev; 1069 1070 if (sc != NULL) 1071 cpu_setperf(sc->sc_level); 1072 } 1073 1074 void 1075 acpicpu_setperf(int level) 1076 { 1077 struct acpicpu_softc *sc; 1078 struct acpicpu_pss *pss = NULL; 1079 int idx, len; 1080 uint32_t status = 0; 1081 1082 sc = (struct acpicpu_softc *)curcpu()->ci_acpicpudev; 1083 1084 dnprintf(10, "%s: acpicpu setperf level %d\n", 1085 sc->sc_devnode->name, level); 1086 1087 if (level < 0 || level > 100) { 1088 dnprintf(10, "%s: acpicpu setperf illegal percentage\n", 1089 sc->sc_devnode->name); 1090 return; 1091 } 1092 1093 /* 1094 * XXX this should be handled more gracefully and it needs to also do 1095 * the duty cycle method instead of pss exclusively 1096 */ 1097 if (sc->sc_flags & FLAGS_NOPSS || sc->sc_flags & FLAGS_NOPCT) { 1098 dnprintf(10, "%s: acpicpu no _PSS or _PCT\n", 1099 sc->sc_devnode->name); 1100 return; 1101 } 1102 1103 if (sc->sc_ppc) 1104 len = sc->sc_ppc; 1105 else 1106 len = sc->sc_pss_len; 1107 idx = (len - 1) - (level / (100 / len)); 1108 if (idx < 0) 1109 idx = 0; 1110 1111 if (sc->sc_ppc) 1112 idx += sc->sc_pss_len - sc->sc_ppc; 1113 1114 if (idx > sc->sc_pss_len) 1115 idx = sc->sc_pss_len - 1; 1116 1117 dnprintf(10, "%s: acpicpu setperf index %d pss_len %d ppc %d\n", 1118 sc->sc_devnode->name, idx, sc->sc_pss_len, sc->sc_ppc); 1119 1120 pss = &sc->sc_pss[idx]; 1121 1122 #ifdef ACPI_DEBUG 1123 /* keep this for now since we will need this for debug in the field */ 1124 printf("0 status: %x %llx %u %u ctrl: %x %llx %u %u\n", 1125 sc->sc_pct.pct_status.grd_gas.address_space_id, 1126 sc->sc_pct.pct_status.grd_gas.address, 1127 sc->sc_pct_stat_as, sc->sc_pct_stat_len, 1128 sc->sc_pct.pct_ctrl.grd_gas.address_space_id, 1129 sc->sc_pct.pct_ctrl.grd_gas.address, 1130 sc->sc_pct_ctrl_as, sc->sc_pct_ctrl_len); 1131 #endif 1132 acpi_gasio(sc->sc_acpi, ACPI_IOREAD, 1133 sc->sc_pct.pct_status.grd_gas.address_space_id, 1134 sc->sc_pct.pct_status.grd_gas.address, sc->sc_pct_stat_as, 1135 sc->sc_pct_stat_len, &status); 1136 dnprintf(20, "1 status: %u <- %u\n", status, pss->pss_status); 1137 1138 /* Are we already at the requested frequency? */ 1139 if (status == pss->pss_status) 1140 return; 1141 1142 acpi_gasio(sc->sc_acpi, ACPI_IOWRITE, 1143 sc->sc_pct.pct_ctrl.grd_gas.address_space_id, 1144 sc->sc_pct.pct_ctrl.grd_gas.address, sc->sc_pct_ctrl_as, 1145 sc->sc_pct_ctrl_len, &pss->pss_ctrl); 1146 dnprintf(20, "pss_ctrl: %x\n", pss->pss_ctrl); 1147 1148 acpi_gasio(sc->sc_acpi, ACPI_IOREAD, 1149 sc->sc_pct.pct_status.grd_gas.address_space_id, 1150 sc->sc_pct.pct_status.grd_gas.address, sc->sc_pct_stat_as, 1151 sc->sc_pct_stat_as, &status); 1152 dnprintf(20, "2 status: %d\n", status); 1153 1154 /* Did the transition succeed? */ 1155 if (status == pss->pss_status) { 1156 cpuspeed = pss->pss_core_freq; 1157 sc->sc_level = level; 1158 } else 1159 printf("%s: acpicpu setperf failed to alter frequency\n", 1160 sc->sc_devnode->name); 1161 } 1162 1163 void 1164 acpicpu_idle(void) 1165 { 1166 struct cpu_info *ci = curcpu(); 1167 struct acpicpu_softc *sc = (struct acpicpu_softc *)ci->ci_acpicpudev; 1168 struct acpi_cstate *best, *cx; 1169 unsigned long itime; 1170 1171 if (sc == NULL) { 1172 __asm volatile("sti"); 1173 panic("null acpicpu"); 1174 } 1175 1176 /* possibly update the MWAIT_ONLY flag in cpu_info */ 1177 if (sc->sc_flags & FLAGS_MWAIT_ONLY) { 1178 if ((ci->ci_mwait & MWAIT_ONLY) == 0) 1179 atomic_setbits_int(&ci->ci_mwait, MWAIT_ONLY); 1180 } else if (ci->ci_mwait & MWAIT_ONLY) 1181 atomic_clearbits_int(&ci->ci_mwait, MWAIT_ONLY); 1182 1183 /* 1184 * Find the first state with a latency we'll accept, ignoring 1185 * states marked skippable 1186 */ 1187 best = cx = SLIST_FIRST(&sc->sc_cstates); 1188 while ((cx->flags & CST_FLAG_SKIP) || 1189 cx->latency * 3 > sc->sc_prev_sleep) { 1190 if ((cx = SLIST_NEXT(cx, link)) == NULL) 1191 break; 1192 best = cx; 1193 } 1194 1195 if (best->state >= 3 && 1196 (best->flags & CST_FLAG_MWAIT_BM_AVOIDANCE) && 1197 acpi_read_pmreg(acpi_softc, ACPIREG_PM1_STS, 0) & ACPI_PM1_BM_STS) { 1198 /* clear it and back off */ 1199 acpi_write_pmreg(acpi_softc, ACPIREG_PM1_STS, 0, 1200 ACPI_PM1_BM_STS); 1201 while ((cx = SLIST_NEXT(cx, link)) != NULL) { 1202 if (cx->flags & CST_FLAG_SKIP) 1203 continue; 1204 if (cx->state < 3 || 1205 (cx->flags & CST_FLAG_MWAIT_BM_AVOIDANCE) == 0) 1206 break; 1207 } 1208 best = cx; 1209 } 1210 1211 1212 atomic_inc_long(&cst_stats[best->state]); 1213 1214 itime = tick / 2; 1215 switch (best->method) { 1216 default: 1217 case CST_METH_HALT: 1218 __asm volatile("sti; hlt"); 1219 break; 1220 1221 case CST_METH_IO_HALT: 1222 inb((u_short)best->address); 1223 __asm volatile("sti; hlt"); 1224 break; 1225 1226 case CST_METH_MWAIT: 1227 { 1228 struct timeval start, stop; 1229 unsigned int hints; 1230 1231 #ifdef __LP64__ 1232 if ((read_rflags() & PSL_I) == 0) 1233 panic("idle with interrupts blocked!"); 1234 #else 1235 if ((read_eflags() & PSL_I) == 0) 1236 panic("idle with interrupts blocked!"); 1237 #endif 1238 1239 /* something already queued? */ 1240 if (!cpu_is_idle(ci)) 1241 return; 1242 1243 /* 1244 * About to idle; setting the MWAIT_IN_IDLE bit tells 1245 * cpu_unidle() that it can't be a no-op and tells cpu_kick() 1246 * that it doesn't need to use an IPI. We also set the 1247 * MWAIT_KEEP_IDLING bit: those routines clear it to stop 1248 * the mwait. Once they're set, we do a final check of the 1249 * queue, in case another cpu called setrunqueue() and added 1250 * something to the queue and called cpu_unidle() between 1251 * the check in sched_idle() and here. 1252 */ 1253 hints = (unsigned)best->address; 1254 microuptime(&start); 1255 atomic_setbits_int(&ci->ci_mwait, MWAIT_IDLING); 1256 if (cpu_is_idle(ci)) { 1257 /* intel errata AAI65: cflush before monitor */ 1258 if (ci->ci_cflushsz != 0 && 1259 strcmp(cpu_vendor, "GenuineIntel") == 0) { 1260 membar_sync(); 1261 clflush((unsigned long)&ci->ci_mwait); 1262 membar_sync(); 1263 } 1264 1265 monitor(&ci->ci_mwait, 0, 0); 1266 if ((ci->ci_mwait & MWAIT_IDLING) == MWAIT_IDLING) 1267 mwait(0, hints); 1268 } 1269 1270 microuptime(&stop); 1271 timersub(&stop, &start, &stop); 1272 itime = stop.tv_sec * 1000000 + stop.tv_usec; 1273 1274 /* done idling; let cpu_kick() know that an IPI is required */ 1275 atomic_clearbits_int(&ci->ci_mwait, MWAIT_IDLING); 1276 break; 1277 } 1278 1279 case CST_METH_GAS_IO: 1280 inb((u_short)best->address); 1281 /* something harmless to give system time to change state */ 1282 acpi_read_pmreg(acpi_softc, ACPIREG_PM1_STS, 0); 1283 break; 1284 1285 } 1286 1287 sc->sc_last_itime = itime; 1288 itime >>= 1; 1289 sc->sc_prev_sleep = (sc->sc_prev_sleep + (sc->sc_prev_sleep >> 1) 1290 + itime) >> 1; 1291 } 1292 1293 void 1294 acpicpu_suspend(void) 1295 { 1296 extern int cpu_suspended; 1297 struct cpu_info *ci = curcpu(); 1298 struct acpicpu_softc *sc = (struct acpicpu_softc *)ci->ci_acpicpudev; 1299 struct acpi_cstate *best, *cx; 1300 1301 if (sc == NULL) { 1302 __asm volatile("sti"); 1303 panic("null acpicpu"); 1304 } 1305 1306 /* 1307 * Find the lowest usable state. 1308 */ 1309 best = cx = SLIST_FIRST(&sc->sc_cstates); 1310 while ((cx->flags & CST_FLAG_SKIP)) { 1311 if ((cx = SLIST_NEXT(cx, link)) == NULL) 1312 break; 1313 best = cx; 1314 } 1315 1316 switch (best->method) { 1317 default: 1318 case CST_METH_HALT: 1319 __asm volatile("sti; hlt"); 1320 break; 1321 1322 case CST_METH_IO_HALT: 1323 inb((u_short)best->address); 1324 __asm volatile("sti; hlt"); 1325 break; 1326 1327 case CST_METH_MWAIT: 1328 { 1329 unsigned int hints; 1330 1331 hints = (unsigned)best->address; 1332 /* intel errata AAI65: cflush before monitor */ 1333 if (ci->ci_cflushsz != 0 && 1334 strcmp(cpu_vendor, "GenuineIntel") == 0) { 1335 membar_sync(); 1336 clflush((unsigned long)&cpu_suspended); 1337 membar_sync(); 1338 } 1339 1340 monitor(&cpu_suspended, 0, 0); 1341 if (cpu_suspended || !CPU_IS_PRIMARY(ci)) 1342 mwait(0, hints); 1343 1344 break; 1345 } 1346 1347 case CST_METH_GAS_IO: 1348 inb((u_short)best->address); 1349 /* something harmless to give system time to change state */ 1350 acpi_read_pmreg(acpi_softc, ACPIREG_PM1_STS, 0); 1351 break; 1352 1353 } 1354 } 1355