1 /* $NetBSD: acpi_cpu_cstate.c,v 1.52 2011/03/19 12:57:31 jruoho Exp $ */ 2 3 /*- 4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.52 2011/03/19 12:57:31 jruoho Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/cpu.h> 34 #include <sys/device.h> 35 #include <sys/kernel.h> 36 #include <sys/once.h> 37 #include <sys/mutex.h> 38 #include <sys/timetc.h> 39 40 #include <dev/acpi/acpireg.h> 41 #include <dev/acpi/acpivar.h> 42 #include <dev/acpi/acpi_cpu.h> 43 #include <dev/acpi/acpi_timer.h> 44 45 #include <machine/acpi_machdep.h> 46 47 #define _COMPONENT ACPI_BUS_COMPONENT 48 ACPI_MODULE_NAME ("acpi_cpu_cstate") 49 50 static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *); 51 static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *, 52 ACPI_OBJECT *, int ); 53 static void acpicpu_cstate_cst_bios(void); 54 static void acpicpu_cstate_memset(struct acpicpu_softc *); 55 static ACPI_STATUS acpicpu_cstate_dep(struct acpicpu_softc *); 56 static void acpicpu_cstate_fadt(struct acpicpu_softc *); 57 static void acpicpu_cstate_quirks(struct acpicpu_softc *); 58 static int acpicpu_cstate_latency(struct acpicpu_softc *); 59 static bool acpicpu_cstate_bm_check(void); 60 static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int); 61 62 extern struct acpicpu_softc **acpicpu_sc; 63 64 /* 65 * XXX: The local APIC timer (as well as TSC) is typically stopped in C3. 66 * For now, we cannot but disable C3. But there appears to be timer- 67 * related interrupt issues also in C2. The only entirely safe option 68 * at the moment is to use C1. 69 */ 70 #ifdef ACPICPU_ENABLE_C3 71 static int cs_state_max = ACPI_STATE_C3; 72 #else 73 static int cs_state_max = ACPI_STATE_C1; 74 #endif 75 76 void 77 acpicpu_cstate_attach(device_t self) 78 { 79 struct acpicpu_softc *sc = device_private(self); 80 ACPI_STATUS rv; 81 82 /* 83 * Either use the preferred _CST or resort to FADT. 84 */ 85 rv = acpicpu_cstate_cst(sc); 86 87 switch (rv) { 88 89 case AE_OK: 90 acpicpu_cstate_cst_bios(); 91 break; 92 93 default: 94 sc->sc_flags |= ACPICPU_FLAG_C_FADT; 95 acpicpu_cstate_fadt(sc); 96 break; 97 } 98 99 /* 100 * Query the optional _CSD. 101 */ 102 rv = acpicpu_cstate_dep(sc); 103 104 if (ACPI_SUCCESS(rv)) 105 sc->sc_flags |= ACPICPU_FLAG_C_DEP; 106 107 sc->sc_flags |= ACPICPU_FLAG_C; 108 109 acpicpu_cstate_quirks(sc); 110 } 111 112 int 113 acpicpu_cstate_detach(device_t self) 114 { 115 struct acpicpu_softc *sc = device_private(self); 116 static ONCE_DECL(once_detach); 117 int rv; 118 119 rv = RUN_ONCE(&once_detach, acpicpu_md_cstate_stop); 120 121 if (rv != 0) 122 return rv; 123 124 sc->sc_flags &= ~ACPICPU_FLAG_C; 125 126 return 0; 127 } 128 129 void 130 acpicpu_cstate_start(device_t self) 131 { 132 struct acpicpu_softc *sc = device_private(self); 133 134 (void)acpicpu_md_cstate_start(sc); 135 } 136 137 void 138 acpicpu_cstate_suspend(void *aux) 139 { 140 /* Nothing. */ 141 } 142 143 void 144 acpicpu_cstate_resume(void *aux) 145 { 146 acpicpu_cstate_callback(aux); 147 } 148 149 void 150 acpicpu_cstate_callback(void *aux) 151 { 152 struct acpicpu_softc *sc; 153 device_t self = aux; 154 155 sc = device_private(self); 156 157 if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0) 158 return; 159 160 mutex_enter(&sc->sc_mtx); 161 (void)acpicpu_cstate_cst(sc); 162 mutex_exit(&sc->sc_mtx); 163 } 164 165 static ACPI_STATUS 166 acpicpu_cstate_cst(struct acpicpu_softc *sc) 167 { 168 ACPI_OBJECT *elm, *obj; 169 ACPI_BUFFER buf; 170 ACPI_STATUS rv; 171 uint32_t i, n; 172 uint8_t count; 173 174 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf); 175 176 if (ACPI_FAILURE(rv)) 177 return rv; 178 179 obj = buf.Pointer; 180 181 if (obj->Type != ACPI_TYPE_PACKAGE) { 182 rv = AE_TYPE; 183 goto out; 184 } 185 186 if (obj->Package.Count < 2) { 187 rv = AE_LIMIT; 188 goto out; 189 } 190 191 elm = obj->Package.Elements; 192 193 if (elm[0].Type != ACPI_TYPE_INTEGER) { 194 rv = AE_TYPE; 195 goto out; 196 } 197 198 n = elm[0].Integer.Value; 199 200 if (n != obj->Package.Count - 1) { 201 rv = AE_BAD_VALUE; 202 goto out; 203 } 204 205 if (n > ACPI_C_STATES_MAX) { 206 rv = AE_LIMIT; 207 goto out; 208 } 209 210 acpicpu_cstate_memset(sc); 211 212 CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1); 213 CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3); 214 215 for (count = 0, i = 1; i <= n; i++) { 216 217 elm = &obj->Package.Elements[i]; 218 rv = acpicpu_cstate_cst_add(sc, elm, i); 219 220 if (ACPI_SUCCESS(rv)) 221 count++; 222 } 223 224 rv = (count != 0) ? AE_OK : AE_NOT_EXIST; 225 226 out: 227 if (buf.Pointer != NULL) 228 ACPI_FREE(buf.Pointer); 229 230 return rv; 231 } 232 233 static ACPI_STATUS 234 acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm, int i) 235 { 236 struct acpicpu_cstate *cs = sc->sc_cstate; 237 struct acpicpu_cstate state; 238 struct acpicpu_reg *reg; 239 ACPI_STATUS rv = AE_OK; 240 ACPI_OBJECT *obj; 241 uint32_t type; 242 243 (void)memset(&state, 0, sizeof(*cs)); 244 245 state.cs_flags = ACPICPU_FLAG_C_BM_STS; 246 247 if (elm->Type != ACPI_TYPE_PACKAGE) { 248 rv = AE_TYPE; 249 goto out; 250 } 251 252 if (elm->Package.Count != 4) { 253 rv = AE_LIMIT; 254 goto out; 255 } 256 257 /* 258 * Type. 259 */ 260 obj = &elm->Package.Elements[1]; 261 262 if (obj->Type != ACPI_TYPE_INTEGER) { 263 rv = AE_TYPE; 264 goto out; 265 } 266 267 type = obj->Integer.Value; 268 269 if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) { 270 rv = AE_TYPE; 271 goto out; 272 } 273 274 /* 275 * Latency. 276 */ 277 obj = &elm->Package.Elements[2]; 278 279 if (obj->Type != ACPI_TYPE_INTEGER) { 280 rv = AE_TYPE; 281 goto out; 282 } 283 284 state.cs_latency = obj->Integer.Value; 285 286 /* 287 * Power. 288 */ 289 obj = &elm->Package.Elements[3]; 290 291 if (obj->Type != ACPI_TYPE_INTEGER) { 292 rv = AE_TYPE; 293 goto out; 294 } 295 296 state.cs_power = obj->Integer.Value; 297 298 /* 299 * Register. 300 */ 301 obj = &elm->Package.Elements[0]; 302 303 if (obj->Type != ACPI_TYPE_BUFFER) { 304 rv = AE_TYPE; 305 goto out; 306 } 307 308 CTASSERT(sizeof(struct acpicpu_reg) == 15); 309 310 if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) { 311 rv = AE_LIMIT; 312 goto out; 313 } 314 315 reg = (struct acpicpu_reg *)obj->Buffer.Pointer; 316 317 switch (reg->reg_spaceid) { 318 319 case ACPI_ADR_SPACE_SYSTEM_IO: 320 state.cs_method = ACPICPU_C_STATE_SYSIO; 321 322 if (reg->reg_addr == 0) { 323 rv = AE_AML_ILLEGAL_ADDRESS; 324 goto out; 325 } 326 327 if (reg->reg_bitwidth != 8) { 328 rv = AE_AML_BAD_RESOURCE_LENGTH; 329 goto out; 330 } 331 332 state.cs_addr = reg->reg_addr; 333 break; 334 335 case ACPI_ADR_SPACE_FIXED_HARDWARE: 336 state.cs_method = ACPICPU_C_STATE_FFH; 337 338 switch (type) { 339 340 case ACPI_STATE_C1: 341 342 /* 343 * If ACPI wants native access (FFH), but the 344 * MD code does not support MONITOR/MWAIT, use 345 * HLT for C1 and error out for higher C-states. 346 */ 347 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) 348 state.cs_method = ACPICPU_C_STATE_HALT; 349 350 break; 351 352 default: 353 354 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) { 355 rv = AE_SUPPORT; 356 goto out; 357 } 358 } 359 360 if (sc->sc_cap != 0) { 361 362 /* 363 * The _CST FFH GAS encoding may contain 364 * additional hints on Intel processors. 365 * Use these to determine whether we can 366 * avoid the bus master activity check. 367 */ 368 if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0) 369 state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS; 370 } 371 372 break; 373 374 default: 375 rv = AE_AML_INVALID_SPACE_ID; 376 goto out; 377 } 378 379 /* 380 * As some systems define the type arbitrarily, 381 * we use a sequential counter instead of the 382 * BIOS data. For instance, AMD family 14h is 383 * instructed to only use the value 2; see 384 * 385 * Advanced Micro Devices: BIOS and Kernel 386 * Developer's Guide (BKDG) for AMD Family 387 * 14h Models 00h-0Fh Processors. Revision 388 * 3.00, January 4, 2011. 389 */ 390 if (i != (int)type) { 391 392 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 393 "C%d != C%u from BIOS", i, type)); 394 } 395 396 KASSERT(cs[i].cs_method == 0); 397 398 cs[i].cs_addr = state.cs_addr; 399 cs[i].cs_power = state.cs_power; 400 cs[i].cs_flags = state.cs_flags; 401 cs[i].cs_method = state.cs_method; 402 cs[i].cs_latency = state.cs_latency; 403 404 out: 405 if (ACPI_FAILURE(rv)) 406 aprint_error_dev(sc->sc_dev, "failed to add " 407 "C-state: %s\n", AcpiFormatException(rv)); 408 409 return rv; 410 } 411 412 static void 413 acpicpu_cstate_cst_bios(void) 414 { 415 const uint8_t val = AcpiGbl_FADT.CstControl; 416 const uint32_t addr = AcpiGbl_FADT.SmiCommand; 417 418 if (addr == 0 || val == 0) 419 return; 420 421 (void)AcpiOsWritePort(addr, val, 8); 422 } 423 424 static void 425 acpicpu_cstate_memset(struct acpicpu_softc *sc) 426 { 427 uint8_t i = 0; 428 429 while (i < __arraycount(sc->sc_cstate)) { 430 431 sc->sc_cstate[i].cs_addr = 0; 432 sc->sc_cstate[i].cs_power = 0; 433 sc->sc_cstate[i].cs_flags = 0; 434 sc->sc_cstate[i].cs_method = 0; 435 sc->sc_cstate[i].cs_latency = 0; 436 437 i++; 438 } 439 } 440 441 static ACPI_STATUS 442 acpicpu_cstate_dep(struct acpicpu_softc *sc) 443 { 444 ACPI_OBJECT *elm, *obj; 445 ACPI_BUFFER buf; 446 ACPI_STATUS rv; 447 uint32_t val; 448 uint8_t i, n; 449 450 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CSD", &buf); 451 452 if (ACPI_FAILURE(rv)) 453 goto out; 454 455 obj = buf.Pointer; 456 457 if (obj->Type != ACPI_TYPE_PACKAGE) { 458 rv = AE_TYPE; 459 goto out; 460 } 461 462 if (obj->Package.Count != 1) { 463 rv = AE_LIMIT; 464 goto out; 465 } 466 467 elm = &obj->Package.Elements[0]; 468 469 if (obj->Type != ACPI_TYPE_PACKAGE) { 470 rv = AE_TYPE; 471 goto out; 472 } 473 474 n = elm->Package.Count; 475 476 if (n != 6) { 477 rv = AE_LIMIT; 478 goto out; 479 } 480 481 elm = elm->Package.Elements; 482 483 for (i = 0; i < n; i++) { 484 485 if (elm[i].Type != ACPI_TYPE_INTEGER) { 486 rv = AE_TYPE; 487 goto out; 488 } 489 490 if (elm[i].Integer.Value > UINT32_MAX) { 491 rv = AE_AML_NUMERIC_OVERFLOW; 492 goto out; 493 } 494 } 495 496 val = elm[1].Integer.Value; 497 498 if (val != 0) 499 aprint_debug_dev(sc->sc_dev, "invalid revision in _CSD\n"); 500 501 val = elm[3].Integer.Value; 502 503 if (val < ACPICPU_DEP_SW_ALL || val > ACPICPU_DEP_HW_ALL) { 504 rv = AE_AML_BAD_RESOURCE_VALUE; 505 goto out; 506 } 507 508 val = elm[4].Integer.Value; 509 510 if (val > sc->sc_ncpus) { 511 rv = AE_BAD_VALUE; 512 goto out; 513 } 514 515 sc->sc_cstate_dep.dep_domain = elm[2].Integer.Value; 516 sc->sc_cstate_dep.dep_type = elm[3].Integer.Value; 517 sc->sc_cstate_dep.dep_ncpus = elm[4].Integer.Value; 518 sc->sc_cstate_dep.dep_index = elm[5].Integer.Value; 519 520 out: 521 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) 522 aprint_debug_dev(sc->sc_dev, "failed to evaluate " 523 "_CSD: %s\n", AcpiFormatException(rv)); 524 525 if (buf.Pointer != NULL) 526 ACPI_FREE(buf.Pointer); 527 528 return rv; 529 } 530 531 static void 532 acpicpu_cstate_fadt(struct acpicpu_softc *sc) 533 { 534 struct acpicpu_cstate *cs = sc->sc_cstate; 535 536 acpicpu_cstate_memset(sc); 537 538 /* 539 * All x86 processors should support C1 (a.k.a. HALT). 540 */ 541 cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT; 542 543 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) == 0) 544 aprint_debug_dev(sc->sc_dev, "HALT not supported?\n"); 545 546 if (sc->sc_object.ao_pblkaddr == 0) 547 return; 548 549 if (sc->sc_ncpus > 1) { 550 551 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0) 552 return; 553 } 554 555 cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO; 556 cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO; 557 558 cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency; 559 cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency; 560 561 cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4; 562 cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5; 563 564 /* 565 * The P_BLK length should always be 6. If it 566 * is not, reduce functionality accordingly. 567 */ 568 if (sc->sc_object.ao_pblklen < 5) 569 cs[ACPI_STATE_C2].cs_method = 0; 570 571 if (sc->sc_object.ao_pblklen < 6) 572 cs[ACPI_STATE_C3].cs_method = 0; 573 574 /* 575 * Sanity check the latency levels in FADT. 576 * Values above the thresholds are used to 577 * inform that C-states are not supported. 578 */ 579 CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100); 580 CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000); 581 582 if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX) 583 cs[ACPI_STATE_C2].cs_method = 0; 584 585 if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX) 586 cs[ACPI_STATE_C3].cs_method = 0; 587 } 588 589 static void 590 acpicpu_cstate_quirks(struct acpicpu_softc *sc) 591 { 592 const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock; 593 const uint32_t len = AcpiGbl_FADT.Pm2ControlLength; 594 595 /* 596 * Disable C3 for PIIX4. 597 */ 598 if ((sc->sc_flags & ACPICPU_FLAG_PIIX4) != 0) { 599 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0; 600 return; 601 } 602 603 /* 604 * Check bus master arbitration. If ARB_DIS 605 * is not available, processor caches must be 606 * flushed before C3 (ACPI 4.0, section 8.2). 607 */ 608 if (reg != 0 && len != 0) { 609 sc->sc_flags |= ACPICPU_FLAG_C_ARB; 610 return; 611 } 612 613 /* 614 * Disable C3 entirely if WBINVD is not present. 615 */ 616 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0) 617 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0; 618 else { 619 /* 620 * If WBINVD is present and functioning properly, 621 * flush all processor caches before entering C3. 622 */ 623 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) 624 sc->sc_flags &= ~ACPICPU_FLAG_C_BM; 625 else 626 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0; 627 } 628 } 629 630 static int 631 acpicpu_cstate_latency(struct acpicpu_softc *sc) 632 { 633 static const uint32_t cs_factor = 3; 634 struct acpicpu_cstate *cs; 635 int i; 636 637 for (i = cs_state_max; i > 0; i--) { 638 639 cs = &sc->sc_cstate[i]; 640 641 if (__predict_false(cs->cs_method == 0)) 642 continue; 643 644 /* 645 * Choose a state if we have previously slept 646 * longer than the worst case latency of the 647 * state times an arbitrary multiplier. 648 */ 649 if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor) 650 return i; 651 } 652 653 return ACPI_STATE_C1; 654 } 655 656 /* 657 * The main idle loop. 658 */ 659 void 660 acpicpu_cstate_idle(void) 661 { 662 struct cpu_info *ci = curcpu(); 663 struct acpicpu_softc *sc; 664 int state; 665 666 acpi_md_OsDisableInterrupt(); 667 668 if (__predict_false(ci->ci_want_resched != 0)) 669 goto out; 670 671 KASSERT(acpicpu_sc != NULL); 672 KASSERT(ci->ci_acpiid < maxcpus); 673 674 sc = acpicpu_sc[ci->ci_acpiid]; 675 676 if (__predict_false(sc == NULL)) 677 goto out; 678 679 KASSERT(ci->ci_ilevel == IPL_NONE); 680 KASSERT((sc->sc_flags & ACPICPU_FLAG_C) != 0); 681 682 if (__predict_false(sc->sc_cold != false)) 683 goto out; 684 685 if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0)) 686 goto out; 687 688 mutex_exit(&sc->sc_mtx); 689 state = acpicpu_cstate_latency(sc); 690 691 /* 692 * Apply AMD C1E quirk. 693 */ 694 if ((sc->sc_flags & ACPICPU_FLAG_C_C1E) != 0) 695 acpicpu_md_quirk_c1e(); 696 697 /* 698 * Check for bus master activity. Note that particularly usb(4) 699 * causes high activity, which may prevent the use of C3 states. 700 */ 701 if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) { 702 703 if (acpicpu_cstate_bm_check() != false) 704 state--; 705 706 if (__predict_false(sc->sc_cstate[state].cs_method == 0)) 707 state = ACPI_STATE_C1; 708 } 709 710 KASSERT(state != ACPI_STATE_C0); 711 712 if (state != ACPI_STATE_C3) { 713 acpicpu_cstate_idle_enter(sc, state); 714 return; 715 } 716 717 /* 718 * On all recent (Intel) CPUs caches are shared 719 * by CPUs and bus master control is required to 720 * keep these coherent while in C3. Flushing the 721 * CPU caches is only the last resort. 722 */ 723 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0) 724 ACPI_FLUSH_CPU_CACHE(); 725 726 /* 727 * Allow the bus master to request that any given 728 * CPU should return immediately to C0 from C3. 729 */ 730 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) 731 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 732 733 /* 734 * It may be necessary to disable bus master arbitration 735 * to ensure that bus master cycles do not occur while 736 * sleeping in C3 (see ACPI 4.0, section 8.1.4). 737 */ 738 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) 739 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); 740 741 acpicpu_cstate_idle_enter(sc, state); 742 743 /* 744 * Disable bus master wake and re-enable the arbiter. 745 */ 746 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) 747 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 748 749 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) 750 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); 751 752 return; 753 754 out: 755 acpi_md_OsEnableInterrupt(); 756 } 757 758 static void 759 acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state) 760 { 761 struct acpicpu_cstate *cs = &sc->sc_cstate[state]; 762 uint32_t end, start, val; 763 764 start = acpitimer_read_fast(NULL); 765 766 switch (cs->cs_method) { 767 768 case ACPICPU_C_STATE_FFH: 769 case ACPICPU_C_STATE_HALT: 770 acpicpu_md_cstate_enter(cs->cs_method, state); 771 break; 772 773 case ACPICPU_C_STATE_SYSIO: 774 (void)AcpiOsReadPort(cs->cs_addr, &val, 8); 775 break; 776 } 777 778 acpi_md_OsEnableInterrupt(); 779 780 cs->cs_evcnt.ev_count++; 781 end = acpitimer_read_fast(NULL); 782 sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000; 783 } 784 785 static bool 786 acpicpu_cstate_bm_check(void) 787 { 788 uint32_t val = 0; 789 ACPI_STATUS rv; 790 791 rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val); 792 793 if (ACPI_FAILURE(rv) || val == 0) 794 return false; 795 796 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); 797 798 return true; 799 } 800