1 /* $NetBSD: acpi_cpu_cstate.c,v 1.58 2011/10/13 05:20:45 jruoho Exp $ */ 2 3 /*- 4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.58 2011/10/13 05:20:45 jruoho Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/cpu.h> 34 #include <sys/device.h> 35 #include <sys/kernel.h> 36 #include <sys/mutex.h> 37 #include <sys/timetc.h> 38 39 #include <dev/acpi/acpireg.h> 40 #include <dev/acpi/acpivar.h> 41 #include <dev/acpi/acpi_cpu.h> 42 #include <dev/acpi/acpi_timer.h> 43 44 #include <machine/acpi_machdep.h> 45 46 #define _COMPONENT ACPI_BUS_COMPONENT 47 ACPI_MODULE_NAME ("acpi_cpu_cstate") 48 49 static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *); 50 static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *, 51 ACPI_OBJECT *); 52 static void acpicpu_cstate_cst_bios(void); 53 static void acpicpu_cstate_memset(struct acpicpu_softc *); 54 static ACPI_STATUS acpicpu_cstate_dep(struct acpicpu_softc *); 55 static void acpicpu_cstate_fadt(struct acpicpu_softc *); 56 static void acpicpu_cstate_quirks(struct acpicpu_softc *); 57 static int acpicpu_cstate_latency(struct acpicpu_softc *); 58 static bool acpicpu_cstate_bm_check(void); 59 static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int); 60 61 extern struct acpicpu_softc **acpicpu_sc; 62 63 /* 64 * XXX: The local APIC timer (as well as TSC) is typically stopped in C3. 65 * For now, we cannot but disable C3. But there appears to be timer- 66 * related interrupt issues also in C2. The only entirely safe option 67 * at the moment is to use C1. 68 */ 69 #ifdef ACPICPU_ENABLE_C3 70 static int cs_state_max = ACPI_STATE_C3; 71 #else 72 static int cs_state_max = ACPI_STATE_C1; 73 #endif 74 75 void 76 acpicpu_cstate_attach(device_t self) 77 { 78 struct acpicpu_softc *sc = device_private(self); 79 ACPI_STATUS rv; 80 81 /* 82 * Either use the preferred _CST or resort to FADT. 83 */ 84 rv = acpicpu_cstate_cst(sc); 85 86 switch (rv) { 87 88 case AE_OK: 89 acpicpu_cstate_cst_bios(); 90 break; 91 92 default: 93 sc->sc_flags |= ACPICPU_FLAG_C_FADT; 94 acpicpu_cstate_fadt(sc); 95 break; 96 } 97 98 /* 99 * Query the optional _CSD. 100 */ 101 rv = acpicpu_cstate_dep(sc); 102 103 if (ACPI_SUCCESS(rv)) 104 sc->sc_flags |= ACPICPU_FLAG_C_DEP; 105 106 sc->sc_flags |= ACPICPU_FLAG_C; 107 108 acpicpu_cstate_quirks(sc); 109 } 110 111 void 112 acpicpu_cstate_detach(device_t self) 113 { 114 struct acpicpu_softc *sc = device_private(self); 115 116 if ((sc->sc_flags & ACPICPU_FLAG_C) == 0) 117 return; 118 119 (void)acpicpu_md_cstate_stop(); 120 121 sc->sc_flags &= ~ACPICPU_FLAG_C; 122 } 123 124 void 125 acpicpu_cstate_start(device_t self) 126 { 127 struct acpicpu_softc *sc = device_private(self); 128 129 (void)acpicpu_md_cstate_start(sc); 130 } 131 132 void 133 acpicpu_cstate_suspend(void *aux) 134 { 135 /* Nothing. */ 136 } 137 138 void 139 acpicpu_cstate_resume(void *aux) 140 { 141 acpicpu_cstate_callback(aux); 142 } 143 144 void 145 acpicpu_cstate_callback(void *aux) 146 { 147 struct acpicpu_softc *sc; 148 device_t self = aux; 149 150 sc = device_private(self); 151 152 if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0) 153 return; 154 155 mutex_enter(&sc->sc_mtx); 156 (void)acpicpu_cstate_cst(sc); 157 mutex_exit(&sc->sc_mtx); 158 } 159 160 static ACPI_STATUS 161 acpicpu_cstate_cst(struct acpicpu_softc *sc) 162 { 163 struct acpicpu_cstate *cs = sc->sc_cstate; 164 ACPI_OBJECT *elm, *obj; 165 ACPI_BUFFER buf; 166 ACPI_STATUS rv; 167 uint32_t i, n; 168 uint8_t count; 169 170 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf); 171 172 if (ACPI_FAILURE(rv)) 173 return rv; 174 175 obj = buf.Pointer; 176 177 if (obj->Type != ACPI_TYPE_PACKAGE) { 178 rv = AE_TYPE; 179 goto out; 180 } 181 182 if (obj->Package.Count < 2) { 183 rv = AE_LIMIT; 184 goto out; 185 } 186 187 elm = obj->Package.Elements; 188 189 if (elm[0].Type != ACPI_TYPE_INTEGER) { 190 rv = AE_TYPE; 191 goto out; 192 } 193 194 n = elm[0].Integer.Value; 195 196 if (n != obj->Package.Count - 1) { 197 rv = AE_BAD_VALUE; 198 goto out; 199 } 200 201 if (n > ACPI_C_STATES_MAX) { 202 rv = AE_LIMIT; 203 goto out; 204 } 205 206 acpicpu_cstate_memset(sc); 207 208 /* 209 * All x86 processors should support C1 (a.k.a. HALT). 210 */ 211 cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT; 212 213 CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1); 214 CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3); 215 216 for (count = 0, i = 1; i <= n; i++) { 217 218 elm = &obj->Package.Elements[i]; 219 rv = acpicpu_cstate_cst_add(sc, elm); 220 221 if (ACPI_SUCCESS(rv)) 222 count++; 223 } 224 225 rv = (count != 0) ? AE_OK : AE_NOT_EXIST; 226 227 out: 228 if (buf.Pointer != NULL) 229 ACPI_FREE(buf.Pointer); 230 231 return rv; 232 } 233 234 static ACPI_STATUS 235 acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm) 236 { 237 struct acpicpu_cstate *cs = sc->sc_cstate; 238 struct acpicpu_cstate state; 239 struct acpicpu_reg *reg; 240 ACPI_STATUS rv = AE_OK; 241 ACPI_OBJECT *obj; 242 uint32_t type; 243 244 (void)memset(&state, 0, sizeof(*cs)); 245 246 state.cs_flags = ACPICPU_FLAG_C_BM_STS; 247 248 if (elm->Type != ACPI_TYPE_PACKAGE) { 249 rv = AE_TYPE; 250 goto out; 251 } 252 253 if (elm->Package.Count != 4) { 254 rv = AE_LIMIT; 255 goto out; 256 } 257 258 /* 259 * Type. 260 */ 261 obj = &elm->Package.Elements[1]; 262 263 if (obj->Type != ACPI_TYPE_INTEGER) { 264 rv = AE_TYPE; 265 goto out; 266 } 267 268 type = obj->Integer.Value; 269 270 if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) { 271 rv = AE_TYPE; 272 goto out; 273 } 274 275 /* 276 * Latency. 277 */ 278 obj = &elm->Package.Elements[2]; 279 280 if (obj->Type != ACPI_TYPE_INTEGER) { 281 rv = AE_TYPE; 282 goto out; 283 } 284 285 state.cs_latency = obj->Integer.Value; 286 287 /* 288 * Power. 289 */ 290 obj = &elm->Package.Elements[3]; 291 292 if (obj->Type != ACPI_TYPE_INTEGER) { 293 rv = AE_TYPE; 294 goto out; 295 } 296 297 state.cs_power = obj->Integer.Value; 298 299 /* 300 * Register. 301 */ 302 obj = &elm->Package.Elements[0]; 303 304 if (obj->Type != ACPI_TYPE_BUFFER) { 305 rv = AE_TYPE; 306 goto out; 307 } 308 309 CTASSERT(sizeof(struct acpicpu_reg) == 15); 310 311 if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) { 312 rv = AE_LIMIT; 313 goto out; 314 } 315 316 reg = (struct acpicpu_reg *)obj->Buffer.Pointer; 317 318 switch (reg->reg_spaceid) { 319 320 case ACPI_ADR_SPACE_SYSTEM_IO: 321 state.cs_method = ACPICPU_C_STATE_SYSIO; 322 323 if (reg->reg_addr == 0) { 324 rv = AE_AML_ILLEGAL_ADDRESS; 325 goto out; 326 } 327 328 if (reg->reg_bitwidth != 8) { 329 rv = AE_AML_BAD_RESOURCE_LENGTH; 330 goto out; 331 } 332 333 state.cs_addr = reg->reg_addr; 334 break; 335 336 case ACPI_ADR_SPACE_FIXED_HARDWARE: 337 state.cs_method = ACPICPU_C_STATE_FFH; 338 339 switch (type) { 340 341 case ACPI_STATE_C1: 342 343 /* 344 * If ACPI wants native access (FFH), but the 345 * MD code does not support MONITOR/MWAIT, use 346 * HLT for C1 and error out for higher C-states. 347 */ 348 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) 349 state.cs_method = ACPICPU_C_STATE_HALT; 350 351 break; 352 353 default: 354 355 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) { 356 rv = AE_SUPPORT; 357 goto out; 358 } 359 } 360 361 if (sc->sc_cap != 0) { 362 363 /* 364 * The _CST FFH GAS encoding may contain 365 * additional hints on Intel processors. 366 * Use these to determine whether we can 367 * avoid the bus master activity check. 368 */ 369 if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0) 370 state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS; 371 } 372 373 break; 374 375 default: 376 rv = AE_AML_INVALID_SPACE_ID; 377 goto out; 378 } 379 380 cs[type].cs_addr = state.cs_addr; 381 cs[type].cs_power = state.cs_power; 382 cs[type].cs_flags = state.cs_flags; 383 cs[type].cs_method = state.cs_method; 384 cs[type].cs_latency = state.cs_latency; 385 386 out: 387 if (ACPI_FAILURE(rv)) 388 aprint_error_dev(sc->sc_dev, "failed to add " 389 "C-state: %s\n", AcpiFormatException(rv)); 390 391 return rv; 392 } 393 394 static void 395 acpicpu_cstate_cst_bios(void) 396 { 397 const uint8_t val = AcpiGbl_FADT.CstControl; 398 const uint32_t addr = AcpiGbl_FADT.SmiCommand; 399 400 if (addr == 0 || val == 0) 401 return; 402 403 (void)AcpiOsWritePort(addr, val, 8); 404 } 405 406 static void 407 acpicpu_cstate_memset(struct acpicpu_softc *sc) 408 { 409 uint8_t i = 0; 410 411 while (i < __arraycount(sc->sc_cstate)) { 412 413 sc->sc_cstate[i].cs_addr = 0; 414 sc->sc_cstate[i].cs_power = 0; 415 sc->sc_cstate[i].cs_flags = 0; 416 sc->sc_cstate[i].cs_method = 0; 417 sc->sc_cstate[i].cs_latency = 0; 418 419 i++; 420 } 421 } 422 423 static ACPI_STATUS 424 acpicpu_cstate_dep(struct acpicpu_softc *sc) 425 { 426 ACPI_OBJECT *elm, *obj; 427 ACPI_BUFFER buf; 428 ACPI_STATUS rv; 429 uint32_t val; 430 uint8_t i, n; 431 432 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CSD", &buf); 433 434 if (ACPI_FAILURE(rv)) 435 goto out; 436 437 obj = buf.Pointer; 438 439 if (obj->Type != ACPI_TYPE_PACKAGE) { 440 rv = AE_TYPE; 441 goto out; 442 } 443 444 if (obj->Package.Count != 1) { 445 rv = AE_LIMIT; 446 goto out; 447 } 448 449 elm = &obj->Package.Elements[0]; 450 451 if (obj->Type != ACPI_TYPE_PACKAGE) { 452 rv = AE_TYPE; 453 goto out; 454 } 455 456 n = elm->Package.Count; 457 458 if (n != 6) { 459 rv = AE_LIMIT; 460 goto out; 461 } 462 463 elm = elm->Package.Elements; 464 465 for (i = 0; i < n; i++) { 466 467 if (elm[i].Type != ACPI_TYPE_INTEGER) { 468 rv = AE_TYPE; 469 goto out; 470 } 471 472 if (elm[i].Integer.Value > UINT32_MAX) { 473 rv = AE_AML_NUMERIC_OVERFLOW; 474 goto out; 475 } 476 } 477 478 val = elm[1].Integer.Value; 479 480 if (val != 0) 481 aprint_debug_dev(sc->sc_dev, "invalid revision in _CSD\n"); 482 483 val = elm[3].Integer.Value; 484 485 if (val < ACPICPU_DEP_SW_ALL || val > ACPICPU_DEP_HW_ALL) { 486 rv = AE_AML_BAD_RESOURCE_VALUE; 487 goto out; 488 } 489 490 val = elm[4].Integer.Value; 491 492 if (val > sc->sc_ncpus) { 493 rv = AE_BAD_VALUE; 494 goto out; 495 } 496 497 sc->sc_cstate_dep.dep_domain = elm[2].Integer.Value; 498 sc->sc_cstate_dep.dep_type = elm[3].Integer.Value; 499 sc->sc_cstate_dep.dep_ncpus = elm[4].Integer.Value; 500 sc->sc_cstate_dep.dep_index = elm[5].Integer.Value; 501 502 out: 503 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) 504 aprint_debug_dev(sc->sc_dev, "failed to evaluate " 505 "_CSD: %s\n", AcpiFormatException(rv)); 506 507 if (buf.Pointer != NULL) 508 ACPI_FREE(buf.Pointer); 509 510 return rv; 511 } 512 513 static void 514 acpicpu_cstate_fadt(struct acpicpu_softc *sc) 515 { 516 struct acpicpu_cstate *cs = sc->sc_cstate; 517 518 acpicpu_cstate_memset(sc); 519 520 /* 521 * All x86 processors should support C1 (a.k.a. HALT). 522 */ 523 cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT; 524 525 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) == 0) 526 aprint_debug_dev(sc->sc_dev, "HALT not supported?\n"); 527 528 if (sc->sc_object.ao_pblkaddr == 0) 529 return; 530 531 if (sc->sc_ncpus > 1) { 532 533 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0) 534 return; 535 } 536 537 cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO; 538 cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO; 539 540 cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency; 541 cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency; 542 543 cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4; 544 cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5; 545 546 /* 547 * The P_BLK length should always be 6. If it 548 * is not, reduce functionality accordingly. 549 */ 550 if (sc->sc_object.ao_pblklen < 5) 551 cs[ACPI_STATE_C2].cs_method = 0; 552 553 if (sc->sc_object.ao_pblklen < 6) 554 cs[ACPI_STATE_C3].cs_method = 0; 555 556 /* 557 * Sanity check the latency levels in FADT. Values above 558 * the thresholds may be used to inform that C2 and C3 are 559 * not supported -- AMD family 11h is an example; 560 * 561 * Advanced Micro Devices: BIOS and Kernel Developer's 562 * Guide (BKDG) for AMD Family 11h Processors. Section 563 * 2.4.3, Revision 3.00, July, 2008. 564 */ 565 CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100); 566 CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000); 567 568 if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX) 569 cs[ACPI_STATE_C2].cs_method = 0; 570 571 if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX) 572 cs[ACPI_STATE_C3].cs_method = 0; 573 } 574 575 static void 576 acpicpu_cstate_quirks(struct acpicpu_softc *sc) 577 { 578 const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock; 579 const uint32_t len = AcpiGbl_FADT.Pm2ControlLength; 580 581 /* 582 * Disable C3 for PIIX4. 583 */ 584 if ((sc->sc_flags & ACPICPU_FLAG_PIIX4) != 0) { 585 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0; 586 return; 587 } 588 589 /* 590 * Check bus master arbitration. If ARB_DIS 591 * is not available, processor caches must be 592 * flushed before C3 (ACPI 4.0, section 8.2). 593 */ 594 if (reg != 0 && len != 0) { 595 sc->sc_flags |= ACPICPU_FLAG_C_ARB; 596 return; 597 } 598 599 /* 600 * Disable C3 entirely if WBINVD is not present. 601 */ 602 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0) 603 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0; 604 else { 605 /* 606 * If WBINVD is present and functioning properly, 607 * flush all processor caches before entering C3. 608 */ 609 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) 610 sc->sc_flags &= ~ACPICPU_FLAG_C_BM; 611 else 612 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0; 613 } 614 } 615 616 static int 617 acpicpu_cstate_latency(struct acpicpu_softc *sc) 618 { 619 static const uint32_t cs_factor = 3; 620 struct acpicpu_cstate *cs; 621 int i; 622 623 KASSERT(mutex_owned(&sc->sc_mtx) != 0); 624 625 for (i = cs_state_max; i > 0; i--) { 626 627 cs = &sc->sc_cstate[i]; 628 629 if (__predict_false(cs->cs_method == 0)) 630 continue; 631 632 /* 633 * Choose a state if we have previously slept 634 * longer than the worst case latency of the 635 * state times an arbitrary multiplier. 636 */ 637 if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor) 638 return i; 639 } 640 641 return ACPI_STATE_C1; 642 } 643 644 /* 645 * The main idle loop. 646 */ 647 void 648 acpicpu_cstate_idle(void) 649 { 650 struct cpu_info *ci = curcpu(); 651 struct acpicpu_softc *sc; 652 int state; 653 654 KASSERT(acpicpu_sc != NULL); 655 KASSERT(ci->ci_acpiid < maxcpus); 656 657 sc = acpicpu_sc[ci->ci_acpiid]; 658 659 if (__predict_false(sc == NULL)) 660 return; 661 662 KASSERT(ci->ci_ilevel == IPL_NONE); 663 KASSERT((sc->sc_flags & ACPICPU_FLAG_C) != 0); 664 665 if (__predict_false(sc->sc_cold != false)) 666 return; 667 668 if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0)) 669 return; 670 671 state = acpicpu_cstate_latency(sc); 672 mutex_exit(&sc->sc_mtx); 673 674 /* 675 * Apply AMD C1E quirk. 676 */ 677 if ((sc->sc_flags & ACPICPU_FLAG_C_C1E) != 0) 678 acpicpu_md_quirk_c1e(); 679 680 /* 681 * Check for bus master activity. Note that particularly usb(4) 682 * causes high activity, which may prevent the use of C3 states. 683 */ 684 if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) { 685 686 if (acpicpu_cstate_bm_check() != false) 687 state--; 688 689 if (__predict_false(sc->sc_cstate[state].cs_method == 0)) 690 state = ACPI_STATE_C1; 691 } 692 693 KASSERT(state != ACPI_STATE_C0); 694 695 if (state != ACPI_STATE_C3) { 696 acpicpu_cstate_idle_enter(sc, state); 697 return; 698 } 699 700 /* 701 * On all recent (Intel) CPUs caches are shared 702 * by CPUs and bus master control is required to 703 * keep these coherent while in C3. Flushing the 704 * CPU caches is only the last resort. 705 */ 706 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0) 707 ACPI_FLUSH_CPU_CACHE(); 708 709 /* 710 * Allow the bus master to request that any given 711 * CPU should return immediately to C0 from C3. 712 */ 713 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) 714 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 715 716 /* 717 * It may be necessary to disable bus master arbitration 718 * to ensure that bus master cycles do not occur while 719 * sleeping in C3 (see ACPI 4.0, section 8.1.4). 720 */ 721 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) 722 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); 723 724 acpicpu_cstate_idle_enter(sc, state); 725 726 /* 727 * Disable bus master wake and re-enable the arbiter. 728 */ 729 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) 730 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 731 732 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) 733 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); 734 } 735 736 static void 737 acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state) 738 { 739 struct acpicpu_cstate *cs = &sc->sc_cstate[state]; 740 uint32_t end, start, val; 741 742 start = acpitimer_read_fast(NULL); 743 744 switch (cs->cs_method) { 745 746 case ACPICPU_C_STATE_FFH: 747 case ACPICPU_C_STATE_HALT: 748 acpicpu_md_cstate_enter(cs->cs_method, state); 749 break; 750 751 case ACPICPU_C_STATE_SYSIO: 752 (void)AcpiOsReadPort(cs->cs_addr, &val, 8); 753 break; 754 } 755 756 cs->cs_evcnt.ev_count++; 757 end = acpitimer_read_fast(NULL); 758 sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000; 759 } 760 761 static bool 762 acpicpu_cstate_bm_check(void) 763 { 764 uint32_t val = 0; 765 ACPI_STATUS rv; 766 767 rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val); 768 769 if (ACPI_FAILURE(rv) || val == 0) 770 return false; 771 772 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); 773 774 return true; 775 } 776