1 /* $NetBSD: acpi_cpu_pstate.c,v 1.36 2010/12/30 12:05:02 jruoho Exp $ */ 2 3 /*- 4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.36 2010/12/30 12:05:02 jruoho Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/evcnt.h> 34 #include <sys/kmem.h> 35 #include <sys/once.h> 36 37 #include <dev/acpi/acpireg.h> 38 #include <dev/acpi/acpivar.h> 39 #include <dev/acpi/acpi_cpu.h> 40 41 #define _COMPONENT ACPI_BUS_COMPONENT 42 ACPI_MODULE_NAME ("acpi_cpu_pstate") 43 44 static void acpicpu_pstate_attach_print(struct acpicpu_softc *); 45 static void acpicpu_pstate_attach_evcnt(struct acpicpu_softc *); 46 static void acpicpu_pstate_detach_evcnt(struct acpicpu_softc *); 47 static ACPI_STATUS acpicpu_pstate_pss(struct acpicpu_softc *); 48 static ACPI_STATUS acpicpu_pstate_pss_add(struct acpicpu_pstate *, 49 ACPI_OBJECT *); 50 static ACPI_STATUS acpicpu_pstate_xpss(struct acpicpu_softc *); 51 static ACPI_STATUS acpicpu_pstate_xpss_add(struct acpicpu_pstate *, 52 ACPI_OBJECT *); 53 static ACPI_STATUS acpicpu_pstate_pct(struct acpicpu_softc *); 54 static int acpicpu_pstate_max(struct acpicpu_softc *); 55 static int acpicpu_pstate_min(struct acpicpu_softc *); 56 static void acpicpu_pstate_change(struct acpicpu_softc *); 57 static void acpicpu_pstate_reset(struct acpicpu_softc *); 58 static void acpicpu_pstate_bios(void); 59 60 static uint32_t acpicpu_pstate_saved = 0; 61 62 void 63 acpicpu_pstate_attach(device_t self) 64 { 65 struct acpicpu_softc *sc = device_private(self); 66 const char *str; 67 ACPI_HANDLE tmp; 68 ACPI_STATUS rv; 69 70 rv = acpicpu_pstate_pss(sc); 71 72 if (ACPI_FAILURE(rv)) { 73 str = "_PSS"; 74 goto fail; 75 } 76 77 /* 78 * Append additional information from the 79 * extended _PSS, if available. Note that 80 * XPSS can not be used on Intel systems 81 * that use either _PDC or _OSC. 82 */ 83 if (sc->sc_cap == 0) { 84 85 rv = acpicpu_pstate_xpss(sc); 86 87 if (ACPI_SUCCESS(rv)) 88 sc->sc_flags |= ACPICPU_FLAG_P_XPSS; 89 } 90 91 rv = acpicpu_pstate_pct(sc); 92 93 if (ACPI_FAILURE(rv)) { 94 str = "_PCT"; 95 goto fail; 96 } 97 98 /* 99 * The ACPI 3.0 and 4.0 specifications mandate three 100 * objects for P-states: _PSS, _PCT, and _PPC. A less 101 * strict wording is however used in the earlier 2.0 102 * standard, and some systems conforming to ACPI 2.0 103 * do not have _PPC, the method for dynamic maximum. 104 */ 105 rv = AcpiGetHandle(sc->sc_node->ad_handle, "_PPC", &tmp); 106 107 if (ACPI_FAILURE(rv)) 108 aprint_debug_dev(self, "_PPC missing\n"); 109 110 /* 111 * Employ the XPSS structure by filling 112 * it with MD information required for FFH. 113 */ 114 rv = acpicpu_md_pstate_pss(sc); 115 116 if (rv != 0) { 117 rv = AE_SUPPORT; 118 goto fail; 119 } 120 121 sc->sc_flags |= ACPICPU_FLAG_P; 122 123 acpicpu_pstate_bios(); 124 acpicpu_pstate_reset(sc); 125 acpicpu_pstate_attach_evcnt(sc); 126 acpicpu_pstate_attach_print(sc); 127 128 return; 129 130 fail: 131 switch (rv) { 132 133 case AE_NOT_FOUND: 134 return; 135 136 case AE_SUPPORT: 137 aprint_verbose_dev(sc->sc_dev, "P-states not supported\n"); 138 return; 139 140 default: 141 aprint_error_dev(sc->sc_dev, "failed to evaluate " 142 "%s: %s\n", str, AcpiFormatException(rv)); 143 } 144 } 145 146 static void 147 acpicpu_pstate_attach_print(struct acpicpu_softc *sc) 148 { 149 const uint8_t method = sc->sc_pstate_control.reg_spaceid; 150 struct acpicpu_pstate *ps; 151 static bool once = false; 152 const char *str; 153 uint32_t i; 154 155 if (once != false) 156 return; 157 158 str = (method != ACPI_ADR_SPACE_SYSTEM_IO) ? "FFH" : "I/O"; 159 160 for (i = 0; i < sc->sc_pstate_count; i++) { 161 162 ps = &sc->sc_pstate[i]; 163 164 if (ps->ps_freq == 0) 165 continue; 166 167 aprint_debug_dev(sc->sc_dev, "P%d: %3s, " 168 "lat %3u us, pow %5u mW, %4u MHz\n", i, str, 169 ps->ps_latency, ps->ps_power, ps->ps_freq); 170 } 171 172 once = true; 173 } 174 175 static void 176 acpicpu_pstate_attach_evcnt(struct acpicpu_softc *sc) 177 { 178 struct acpicpu_pstate *ps; 179 uint32_t i; 180 181 for (i = 0; i < sc->sc_pstate_count; i++) { 182 183 ps = &sc->sc_pstate[i]; 184 185 if (ps->ps_freq == 0) 186 continue; 187 188 (void)snprintf(ps->ps_name, sizeof(ps->ps_name), 189 "P%u (%u MHz)", i, ps->ps_freq); 190 191 evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC, 192 NULL, device_xname(sc->sc_dev), ps->ps_name); 193 } 194 } 195 196 int 197 acpicpu_pstate_detach(device_t self) 198 { 199 struct acpicpu_softc *sc = device_private(self); 200 static ONCE_DECL(once_detach); 201 size_t size; 202 int rv; 203 204 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) 205 return 0; 206 207 rv = RUN_ONCE(&once_detach, acpicpu_md_pstate_stop); 208 209 if (rv != 0) 210 return rv; 211 212 size = sc->sc_pstate_count * sizeof(*sc->sc_pstate); 213 214 if (sc->sc_pstate != NULL) 215 kmem_free(sc->sc_pstate, size); 216 217 sc->sc_flags &= ~ACPICPU_FLAG_P; 218 acpicpu_pstate_detach_evcnt(sc); 219 220 return 0; 221 } 222 223 static void 224 acpicpu_pstate_detach_evcnt(struct acpicpu_softc *sc) 225 { 226 struct acpicpu_pstate *ps; 227 uint32_t i; 228 229 for (i = 0; i < sc->sc_pstate_count; i++) { 230 231 ps = &sc->sc_pstate[i]; 232 233 if (ps->ps_freq != 0) 234 evcnt_detach(&ps->ps_evcnt); 235 } 236 } 237 238 void 239 acpicpu_pstate_start(device_t self) 240 { 241 struct acpicpu_softc *sc = device_private(self); 242 struct acpicpu_pstate *ps; 243 uint32_t i; 244 int rv; 245 246 rv = acpicpu_md_pstate_start(); 247 248 if (rv != 0) 249 goto fail; 250 251 /* 252 * Initialize the state to P0. 253 */ 254 for (i = 0, rv = ENXIO; i < sc->sc_pstate_count; i++) { 255 256 ps = &sc->sc_pstate[i]; 257 258 if (ps->ps_freq != 0) { 259 sc->sc_cold = false; 260 rv = acpicpu_pstate_set(sc, ps->ps_freq); 261 break; 262 } 263 } 264 265 if (rv != 0) 266 goto fail; 267 268 return; 269 270 fail: 271 sc->sc_flags &= ~ACPICPU_FLAG_P; 272 273 if (rv == EEXIST) { 274 aprint_error_dev(self, "driver conflicts with existing one\n"); 275 return; 276 } 277 278 aprint_error_dev(self, "failed to start P-states (err %d)\n", rv); 279 } 280 281 bool 282 acpicpu_pstate_suspend(device_t self) 283 { 284 struct acpicpu_softc *sc = device_private(self); 285 struct acpicpu_pstate *ps = NULL; 286 int32_t i; 287 288 mutex_enter(&sc->sc_mtx); 289 acpicpu_pstate_reset(sc); 290 mutex_exit(&sc->sc_mtx); 291 292 if (acpicpu_pstate_saved != 0) 293 return true; 294 295 /* 296 * Following design notes for Windows, we set the highest 297 * P-state when entering any of the system sleep states. 298 * When resuming, the saved P-state will be restored. 299 * 300 * Microsoft Corporation: Windows Native Processor 301 * Performance Control. Version 1.1a, November, 2002. 302 */ 303 for (i = sc->sc_pstate_count - 1; i >= 0; i--) { 304 305 if (sc->sc_pstate[i].ps_freq != 0) { 306 ps = &sc->sc_pstate[i]; 307 break; 308 } 309 } 310 311 if (__predict_false(ps == NULL)) 312 return true; 313 314 mutex_enter(&sc->sc_mtx); 315 acpicpu_pstate_saved = sc->sc_pstate_current; 316 mutex_exit(&sc->sc_mtx); 317 318 if (acpicpu_pstate_saved == ps->ps_freq) 319 return true; 320 321 (void)acpicpu_pstate_set(sc, ps->ps_freq); 322 323 return true; 324 } 325 326 bool 327 acpicpu_pstate_resume(device_t self) 328 { 329 struct acpicpu_softc *sc = device_private(self); 330 331 if (acpicpu_pstate_saved != 0) { 332 (void)acpicpu_pstate_set(sc, acpicpu_pstate_saved); 333 acpicpu_pstate_saved = 0; 334 } 335 336 return true; 337 } 338 339 void 340 acpicpu_pstate_callback(void *aux) 341 { 342 struct acpicpu_softc *sc; 343 device_t self = aux; 344 uint32_t old, new; 345 346 sc = device_private(self); 347 348 mutex_enter(&sc->sc_mtx); 349 350 old = sc->sc_pstate_max; 351 acpicpu_pstate_change(sc); 352 new = sc->sc_pstate_max; 353 354 if (old == new) { 355 mutex_exit(&sc->sc_mtx); 356 return; 357 } 358 359 mutex_exit(&sc->sc_mtx); 360 361 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "maximum frequency " 362 "changed from P%u (%u MHz) to P%u (%u MHz)\n", 363 old, sc->sc_pstate[old].ps_freq, new, 364 sc->sc_pstate[sc->sc_pstate_max].ps_freq)); 365 366 (void)acpicpu_pstate_set(sc, sc->sc_pstate[new].ps_freq); 367 } 368 369 ACPI_STATUS 370 acpicpu_pstate_pss(struct acpicpu_softc *sc) 371 { 372 struct acpicpu_pstate *ps; 373 ACPI_OBJECT *obj; 374 ACPI_BUFFER buf; 375 ACPI_STATUS rv; 376 uint32_t count; 377 uint32_t i, j; 378 379 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSS", &buf); 380 381 if (ACPI_FAILURE(rv)) 382 return rv; 383 384 obj = buf.Pointer; 385 386 if (obj->Type != ACPI_TYPE_PACKAGE) { 387 rv = AE_TYPE; 388 goto out; 389 } 390 391 sc->sc_pstate_count = obj->Package.Count; 392 393 if (sc->sc_pstate_count == 0) { 394 rv = AE_NOT_EXIST; 395 goto out; 396 } 397 398 if (sc->sc_pstate_count > ACPICPU_P_STATE_MAX) { 399 rv = AE_LIMIT; 400 goto out; 401 } 402 403 sc->sc_pstate = kmem_zalloc(sc->sc_pstate_count * 404 sizeof(struct acpicpu_pstate), KM_SLEEP); 405 406 if (sc->sc_pstate == NULL) { 407 rv = AE_NO_MEMORY; 408 goto out; 409 } 410 411 for (count = i = 0; i < sc->sc_pstate_count; i++) { 412 413 ps = &sc->sc_pstate[i]; 414 rv = acpicpu_pstate_pss_add(ps, &obj->Package.Elements[i]); 415 416 if (ACPI_FAILURE(rv)) { 417 ps->ps_freq = 0; 418 continue; 419 } 420 421 for (j = 0; j < i; j++) { 422 423 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) { 424 ps->ps_freq = 0; 425 break; 426 } 427 } 428 429 if (ps->ps_freq != 0) 430 count++; 431 } 432 433 rv = (count != 0) ? AE_OK : AE_NOT_EXIST; 434 435 out: 436 if (buf.Pointer != NULL) 437 ACPI_FREE(buf.Pointer); 438 439 return rv; 440 } 441 442 static ACPI_STATUS 443 acpicpu_pstate_pss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj) 444 { 445 ACPI_OBJECT *elm; 446 int i; 447 448 if (obj->Type != ACPI_TYPE_PACKAGE) 449 return AE_TYPE; 450 451 if (obj->Package.Count != 6) 452 return AE_BAD_DATA; 453 454 elm = obj->Package.Elements; 455 456 for (i = 0; i < 6; i++) { 457 458 if (elm[i].Type != ACPI_TYPE_INTEGER) 459 return AE_TYPE; 460 461 if (elm[i].Integer.Value > UINT32_MAX) 462 return AE_AML_NUMERIC_OVERFLOW; 463 } 464 465 ps->ps_freq = elm[0].Integer.Value; 466 ps->ps_power = elm[1].Integer.Value; 467 ps->ps_latency = elm[2].Integer.Value; 468 ps->ps_latency_bm = elm[3].Integer.Value; 469 ps->ps_control = elm[4].Integer.Value; 470 ps->ps_status = elm[5].Integer.Value; 471 472 if (ps->ps_freq == 0 || ps->ps_freq > 9999) 473 return AE_BAD_DECIMAL_CONSTANT; 474 475 /* 476 * The latency is typically around 10 usec 477 * on Intel CPUs. Use that as the minimum. 478 */ 479 if (ps->ps_latency < 10) 480 ps->ps_latency = 10; 481 482 return AE_OK; 483 } 484 485 static ACPI_STATUS 486 acpicpu_pstate_xpss(struct acpicpu_softc *sc) 487 { 488 struct acpicpu_pstate *ps; 489 ACPI_OBJECT *obj; 490 ACPI_BUFFER buf; 491 ACPI_STATUS rv; 492 uint32_t i = 0; 493 494 rv = acpi_eval_struct(sc->sc_node->ad_handle, "XPSS", &buf); 495 496 if (ACPI_FAILURE(rv)) 497 return rv; 498 499 obj = buf.Pointer; 500 501 if (obj->Type != ACPI_TYPE_PACKAGE) { 502 rv = AE_TYPE; 503 goto out; 504 } 505 506 if (obj->Package.Count != sc->sc_pstate_count) { 507 rv = AE_LIMIT; 508 goto out; 509 } 510 511 while (i < sc->sc_pstate_count) { 512 513 ps = &sc->sc_pstate[i]; 514 acpicpu_pstate_xpss_add(ps, &obj->Package.Elements[i]); 515 516 i++; 517 } 518 519 out: 520 if (buf.Pointer != NULL) 521 ACPI_FREE(buf.Pointer); 522 523 return rv; 524 } 525 526 static ACPI_STATUS 527 acpicpu_pstate_xpss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj) 528 { 529 ACPI_OBJECT *elm; 530 int i; 531 532 if (obj->Type != ACPI_TYPE_PACKAGE) 533 return AE_TYPE; 534 535 if (obj->Package.Count != 8) 536 return AE_BAD_DATA; 537 538 elm = obj->Package.Elements; 539 540 for (i = 0; i < 4; i++) { 541 542 if (elm[i].Type != ACPI_TYPE_INTEGER) 543 return AE_TYPE; 544 545 if (elm[i].Integer.Value > UINT32_MAX) 546 return AE_AML_NUMERIC_OVERFLOW; 547 } 548 549 for (; i < 8; i++) { 550 551 if (elm[i].Type != ACPI_TYPE_BUFFER) 552 return AE_TYPE; 553 554 if (elm[i].Buffer.Length != 8) 555 return AE_LIMIT; 556 } 557 558 /* 559 * Only overwrite the elements that were 560 * not available from the conventional _PSS. 561 */ 562 if (ps->ps_freq == 0) 563 ps->ps_freq = elm[0].Integer.Value; 564 565 if (ps->ps_power == 0) 566 ps->ps_power = elm[1].Integer.Value; 567 568 if (ps->ps_latency == 0) 569 ps->ps_latency = elm[2].Integer.Value; 570 571 if (ps->ps_latency_bm == 0) 572 ps->ps_latency_bm = elm[3].Integer.Value; 573 574 if (ps->ps_control == 0) 575 ps->ps_control = ACPI_GET64(elm[4].Buffer.Pointer); 576 577 if (ps->ps_status == 0) 578 ps->ps_status = ACPI_GET64(elm[5].Buffer.Pointer); 579 580 if (ps->ps_control_mask == 0) 581 ps->ps_control_mask = ACPI_GET64(elm[6].Buffer.Pointer); 582 583 if (ps->ps_status_mask == 0) 584 ps->ps_status_mask = ACPI_GET64(elm[7].Buffer.Pointer); 585 586 /* 587 * The latency is often defined to be 588 * zero on AMD systems. Raise that to 1. 589 */ 590 if (ps->ps_latency == 0) 591 ps->ps_latency = 1; 592 593 ps->ps_flags |= ACPICPU_FLAG_P_XPSS; 594 595 if (ps->ps_freq > 9999) 596 return AE_BAD_DECIMAL_CONSTANT; 597 598 return AE_OK; 599 } 600 601 ACPI_STATUS 602 acpicpu_pstate_pct(struct acpicpu_softc *sc) 603 { 604 static const size_t size = sizeof(struct acpicpu_reg); 605 struct acpicpu_reg *reg[2]; 606 struct acpicpu_pstate *ps; 607 ACPI_OBJECT *elm, *obj; 608 ACPI_BUFFER buf; 609 ACPI_STATUS rv; 610 uint8_t width; 611 uint32_t i; 612 613 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PCT", &buf); 614 615 if (ACPI_FAILURE(rv)) 616 return rv; 617 618 obj = buf.Pointer; 619 620 if (obj->Type != ACPI_TYPE_PACKAGE) { 621 rv = AE_TYPE; 622 goto out; 623 } 624 625 if (obj->Package.Count != 2) { 626 rv = AE_LIMIT; 627 goto out; 628 } 629 630 for (i = 0; i < 2; i++) { 631 632 elm = &obj->Package.Elements[i]; 633 634 if (elm->Type != ACPI_TYPE_BUFFER) { 635 rv = AE_TYPE; 636 goto out; 637 } 638 639 if (size > elm->Buffer.Length) { 640 rv = AE_AML_BAD_RESOURCE_LENGTH; 641 goto out; 642 } 643 644 reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer; 645 646 switch (reg[i]->reg_spaceid) { 647 648 case ACPI_ADR_SPACE_SYSTEM_IO: 649 650 if (reg[i]->reg_addr == 0) { 651 rv = AE_AML_ILLEGAL_ADDRESS; 652 goto out; 653 } 654 655 width = reg[i]->reg_bitwidth; 656 657 if (width + reg[i]->reg_bitoffset > 32) { 658 rv = AE_AML_BAD_RESOURCE_VALUE; 659 goto out; 660 } 661 662 if (width != 8 && width != 16 && width != 32) { 663 rv = AE_AML_BAD_RESOURCE_VALUE; 664 goto out; 665 } 666 667 break; 668 669 case ACPI_ADR_SPACE_FIXED_HARDWARE: 670 671 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) != 0) { 672 673 if (reg[i]->reg_bitwidth != 64) { 674 rv = AE_AML_BAD_RESOURCE_VALUE; 675 goto out; 676 } 677 678 if (reg[i]->reg_bitoffset != 0) { 679 rv = AE_AML_BAD_RESOURCE_VALUE; 680 goto out; 681 } 682 683 break; 684 } 685 686 if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) == 0) { 687 rv = AE_SUPPORT; 688 goto out; 689 } 690 691 break; 692 693 default: 694 rv = AE_AML_INVALID_SPACE_ID; 695 goto out; 696 } 697 } 698 699 if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) { 700 rv = AE_AML_INVALID_SPACE_ID; 701 goto out; 702 } 703 704 (void)memcpy(&sc->sc_pstate_control, reg[0], size); 705 (void)memcpy(&sc->sc_pstate_status, reg[1], size); 706 707 if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) == 0) 708 goto out; 709 710 /* 711 * In XPSS the control address can not be zero, 712 * but the status address may be. In this case, 713 * comparable to T-states, we can ignore the status 714 * check during the P-state (FFH) transition. 715 */ 716 if (sc->sc_pstate_control.reg_addr == 0) { 717 rv = AE_AML_BAD_RESOURCE_LENGTH; 718 goto out; 719 } 720 721 /* 722 * If XPSS is present, copy the MSR addresses 723 * to the P-state structures for convenience. 724 */ 725 for (i = 0; i < sc->sc_pstate_count; i++) { 726 727 ps = &sc->sc_pstate[i]; 728 729 if (ps->ps_freq == 0) 730 continue; 731 732 ps->ps_status_addr = sc->sc_pstate_status.reg_addr; 733 ps->ps_control_addr = sc->sc_pstate_control.reg_addr; 734 } 735 736 out: 737 if (buf.Pointer != NULL) 738 ACPI_FREE(buf.Pointer); 739 740 return rv; 741 } 742 743 static int 744 acpicpu_pstate_max(struct acpicpu_softc *sc) 745 { 746 ACPI_INTEGER val; 747 ACPI_STATUS rv; 748 749 /* 750 * Evaluate the currently highest P-state that can be used. 751 * If available, we can use either this state or any lower 752 * power (i.e. higher numbered) state from the _PSS object. 753 * Note that the return value must match the _OST parameter. 754 */ 755 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PPC", &val); 756 757 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) { 758 759 if (sc->sc_pstate[val].ps_freq != 0) { 760 sc->sc_pstate_max = val; 761 return 0; 762 } 763 } 764 765 return 1; 766 } 767 768 static int 769 acpicpu_pstate_min(struct acpicpu_softc *sc) 770 { 771 ACPI_INTEGER val; 772 ACPI_STATUS rv; 773 774 /* 775 * The _PDL object defines the minimum when passive cooling 776 * is being performed. If available, we can use the returned 777 * state or any higher power (i.e. lower numbered) state. 778 */ 779 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PDL", &val); 780 781 if (ACPI_SUCCESS(rv) && val < sc->sc_pstate_count) { 782 783 if (sc->sc_pstate[val].ps_freq == 0) 784 return 1; 785 786 if (val >= sc->sc_pstate_max) { 787 sc->sc_pstate_min = val; 788 return 0; 789 } 790 } 791 792 return 1; 793 } 794 795 static void 796 acpicpu_pstate_change(struct acpicpu_softc *sc) 797 { 798 static ACPI_STATUS rv = AE_OK; 799 ACPI_OBJECT_LIST arg; 800 ACPI_OBJECT obj[2]; 801 static int val = 0; 802 803 acpicpu_pstate_reset(sc); 804 805 /* 806 * Cache the checks as the optional 807 * _PDL and _OST are rarely present. 808 */ 809 if (val == 0) 810 val = acpicpu_pstate_min(sc); 811 812 arg.Count = 2; 813 arg.Pointer = obj; 814 815 obj[0].Type = ACPI_TYPE_INTEGER; 816 obj[1].Type = ACPI_TYPE_INTEGER; 817 818 obj[0].Integer.Value = ACPICPU_P_NOTIFY; 819 obj[1].Integer.Value = acpicpu_pstate_max(sc); 820 821 if (ACPI_FAILURE(rv)) 822 return; 823 824 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OST", &arg, NULL); 825 } 826 827 static void 828 acpicpu_pstate_reset(struct acpicpu_softc *sc) 829 { 830 831 sc->sc_pstate_max = 0; 832 sc->sc_pstate_min = sc->sc_pstate_count - 1; 833 834 } 835 836 static void 837 acpicpu_pstate_bios(void) 838 { 839 const uint8_t val = AcpiGbl_FADT.PstateControl; 840 const uint32_t addr = AcpiGbl_FADT.SmiCommand; 841 842 if (addr == 0 || val == 0) 843 return; 844 845 (void)AcpiOsWritePort(addr, val, 8); 846 } 847 848 int 849 acpicpu_pstate_get(struct acpicpu_softc *sc, uint32_t *freq) 850 { 851 const uint8_t method = sc->sc_pstate_control.reg_spaceid; 852 struct acpicpu_pstate *ps = NULL; 853 uint32_t i, val = 0; 854 uint64_t addr; 855 uint8_t width; 856 int rv; 857 858 if (__predict_false(sc->sc_cold != false)) { 859 rv = EBUSY; 860 goto fail; 861 } 862 863 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P) == 0)) { 864 rv = ENODEV; 865 goto fail; 866 } 867 868 mutex_enter(&sc->sc_mtx); 869 870 /* 871 * Use the cached value, if available. 872 */ 873 if (sc->sc_pstate_current != ACPICPU_P_STATE_UNKNOWN) { 874 *freq = sc->sc_pstate_current; 875 mutex_exit(&sc->sc_mtx); 876 return 0; 877 } 878 879 mutex_exit(&sc->sc_mtx); 880 881 switch (method) { 882 883 case ACPI_ADR_SPACE_FIXED_HARDWARE: 884 885 rv = acpicpu_md_pstate_get(sc, freq); 886 887 if (__predict_false(rv != 0)) 888 goto fail; 889 890 break; 891 892 case ACPI_ADR_SPACE_SYSTEM_IO: 893 894 addr = sc->sc_pstate_status.reg_addr; 895 width = sc->sc_pstate_status.reg_bitwidth; 896 897 (void)AcpiOsReadPort(addr, &val, width); 898 899 if (val == 0) { 900 rv = EIO; 901 goto fail; 902 } 903 904 for (i = 0; i < sc->sc_pstate_count; i++) { 905 906 if (sc->sc_pstate[i].ps_freq == 0) 907 continue; 908 909 if (val == sc->sc_pstate[i].ps_status) { 910 ps = &sc->sc_pstate[i]; 911 break; 912 } 913 } 914 915 if (ps == NULL) { 916 rv = EIO; 917 goto fail; 918 } 919 920 *freq = ps->ps_freq; 921 break; 922 923 default: 924 rv = ENOTTY; 925 goto fail; 926 } 927 928 mutex_enter(&sc->sc_mtx); 929 sc->sc_pstate_current = *freq; 930 mutex_exit(&sc->sc_mtx); 931 932 return 0; 933 934 fail: 935 aprint_error_dev(sc->sc_dev, "failed " 936 "to get frequency (err %d)\n", rv); 937 938 mutex_enter(&sc->sc_mtx); 939 *freq = sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN; 940 mutex_exit(&sc->sc_mtx); 941 942 return rv; 943 } 944 945 int 946 acpicpu_pstate_set(struct acpicpu_softc *sc, uint32_t freq) 947 { 948 const uint8_t method = sc->sc_pstate_control.reg_spaceid; 949 struct acpicpu_pstate *ps = NULL; 950 uint32_t i, val; 951 uint64_t addr; 952 uint8_t width; 953 int rv; 954 955 if (__predict_false(sc->sc_cold != false)) { 956 rv = EBUSY; 957 goto fail; 958 } 959 960 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P) == 0)) { 961 rv = ENODEV; 962 goto fail; 963 } 964 965 mutex_enter(&sc->sc_mtx); 966 967 if (sc->sc_pstate_current == freq) { 968 mutex_exit(&sc->sc_mtx); 969 return 0; 970 } 971 972 /* 973 * Verify that the requested frequency is available. 974 * 975 * The access needs to be protected since the currently 976 * available maximum and minimum may change dynamically. 977 */ 978 for (i = sc->sc_pstate_max; i <= sc->sc_pstate_min; i++) { 979 980 if (__predict_false(sc->sc_pstate[i].ps_freq == 0)) 981 continue; 982 983 if (sc->sc_pstate[i].ps_freq == freq) { 984 ps = &sc->sc_pstate[i]; 985 break; 986 } 987 } 988 989 mutex_exit(&sc->sc_mtx); 990 991 if (__predict_false(ps == NULL)) { 992 rv = EINVAL; 993 goto fail; 994 } 995 996 switch (method) { 997 998 case ACPI_ADR_SPACE_FIXED_HARDWARE: 999 1000 rv = acpicpu_md_pstate_set(ps); 1001 1002 if (__predict_false(rv != 0)) 1003 goto fail; 1004 1005 break; 1006 1007 case ACPI_ADR_SPACE_SYSTEM_IO: 1008 1009 addr = sc->sc_pstate_control.reg_addr; 1010 width = sc->sc_pstate_control.reg_bitwidth; 1011 1012 (void)AcpiOsWritePort(addr, ps->ps_control, width); 1013 1014 addr = sc->sc_pstate_status.reg_addr; 1015 width = sc->sc_pstate_status.reg_bitwidth; 1016 1017 /* 1018 * Some systems take longer to respond 1019 * than the reported worst-case latency. 1020 */ 1021 for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) { 1022 1023 (void)AcpiOsReadPort(addr, &val, width); 1024 1025 if (val == ps->ps_status) 1026 break; 1027 1028 DELAY(ps->ps_latency); 1029 } 1030 1031 if (i == ACPICPU_P_STATE_RETRY) { 1032 rv = EAGAIN; 1033 goto fail; 1034 } 1035 1036 break; 1037 1038 default: 1039 rv = ENOTTY; 1040 goto fail; 1041 } 1042 1043 mutex_enter(&sc->sc_mtx); 1044 ps->ps_evcnt.ev_count++; 1045 sc->sc_pstate_current = freq; 1046 mutex_exit(&sc->sc_mtx); 1047 1048 return 0; 1049 1050 fail: 1051 aprint_error_dev(sc->sc_dev, "failed to set " 1052 "frequency to %u (err %d)\n", freq, rv); 1053 1054 mutex_enter(&sc->sc_mtx); 1055 sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN; 1056 mutex_exit(&sc->sc_mtx); 1057 1058 return rv; 1059 } 1060