1 /* $NetBSD: acpi_cpu.c,v 1.50 2014/02/25 18:30:09 pooka Exp $ */ 2 3 /*- 4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.50 2014/02/25 18:30:09 pooka Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/cpu.h> 34 #include <sys/evcnt.h> 35 #include <sys/kernel.h> 36 #include <sys/kmem.h> 37 #include <sys/module.h> 38 #include <sys/mutex.h> 39 #include <sys/sysctl.h> 40 #include <sys/cpufreq.h> 41 42 #include <dev/acpi/acpireg.h> 43 #include <dev/acpi/acpivar.h> 44 #include <dev/acpi/acpi_cpu.h> 45 46 #include <machine/acpi_machdep.h> 47 #include <machine/cpuvar.h> 48 49 #define _COMPONENT ACPI_BUS_COMPONENT 50 ACPI_MODULE_NAME ("acpi_cpu") 51 52 static int acpicpu_match(device_t, cfdata_t, void *); 53 static void acpicpu_attach(device_t, device_t, void *); 54 static int acpicpu_detach(device_t, int); 55 static int acpicpu_once_attach(void); 56 static int acpicpu_once_detach(void); 57 static void acpicpu_start(device_t); 58 static void acpicpu_sysctl(device_t); 59 60 static ACPI_STATUS acpicpu_object(ACPI_HANDLE, struct acpicpu_object *); 61 static uint32_t acpicpu_cap(struct acpicpu_softc *); 62 static ACPI_STATUS acpicpu_cap_osc(struct acpicpu_softc *, 63 uint32_t, uint32_t *); 64 static void acpicpu_notify(ACPI_HANDLE, uint32_t, void *); 65 static bool acpicpu_suspend(device_t, const pmf_qual_t *); 66 static bool acpicpu_resume(device_t, const pmf_qual_t *); 67 static void acpicpu_evcnt_attach(device_t); 68 static void acpicpu_evcnt_detach(device_t); 69 static void acpicpu_debug_print(device_t); 70 static const char *acpicpu_debug_print_method_c(uint8_t); 71 static const char *acpicpu_debug_print_method_pt(uint8_t); 72 static const char *acpicpu_debug_print_dep(uint32_t); 73 74 static uint32_t acpicpu_count = 0; 75 struct acpicpu_softc **acpicpu_sc = NULL; 76 static struct sysctllog *acpicpu_log = NULL; 77 static bool acpicpu_dynamic = true; 78 static bool acpicpu_passive = true; 79 80 static const struct { 81 const char *manu; 82 const char *prod; 83 const char *vers; 84 } acpicpu_quirks[] = { 85 { "Supermicro", "PDSMi-LN4", "0123456789" }, 86 { "ASUSTeK Computer INC.", "M2A-MX", "Rev 1.xx" }, 87 }; 88 89 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc), 90 acpicpu_match, acpicpu_attach, acpicpu_detach, NULL); 91 92 static int 93 acpicpu_match(device_t parent, cfdata_t match, void *aux) 94 { 95 const char *manu, *prod, *vers; 96 struct cpu_info *ci; 97 size_t i; 98 99 if (acpi_softc == NULL) 100 return 0; 101 102 manu = pmf_get_platform("board-vendor"); 103 prod = pmf_get_platform("board-product"); 104 vers = pmf_get_platform("board-version"); 105 106 if (manu != NULL && prod != NULL && vers != NULL) { 107 108 for (i = 0; i < __arraycount(acpicpu_quirks); i++) { 109 110 if (strcasecmp(acpicpu_quirks[i].manu, manu) == 0 && 111 strcasecmp(acpicpu_quirks[i].prod, prod) == 0 && 112 strcasecmp(acpicpu_quirks[i].vers, vers) == 0) 113 return 0; 114 } 115 } 116 117 ci = acpicpu_md_match(parent, match, aux); 118 119 if (ci == NULL) 120 return 0; 121 122 if (acpi_match_cpu_info(ci) == NULL) 123 return 0; 124 125 return 10; 126 } 127 128 static void 129 acpicpu_attach(device_t parent, device_t self, void *aux) 130 { 131 struct acpicpu_softc *sc = device_private(self); 132 struct cpu_info *ci; 133 ACPI_HANDLE hdl; 134 cpuid_t id; 135 int rv; 136 137 ci = acpicpu_md_attach(parent, self, aux); 138 139 if (ci == NULL) 140 return; 141 142 sc->sc_ci = ci; 143 sc->sc_dev = self; 144 sc->sc_cold = true; 145 146 hdl = acpi_match_cpu_info(ci); 147 148 if (hdl == NULL) { 149 aprint_normal(": failed to match processor\n"); 150 return; 151 } 152 153 sc->sc_node = acpi_match_node(hdl); 154 155 if (acpicpu_once_attach() != 0) { 156 aprint_normal(": failed to initialize\n"); 157 return; 158 } 159 160 KASSERT(acpi_softc != NULL); 161 KASSERT(acpicpu_sc != NULL); 162 KASSERT(sc->sc_node != NULL); 163 164 id = sc->sc_ci->ci_acpiid; 165 166 if (acpicpu_sc[id] != NULL) { 167 aprint_normal(": already attached\n"); 168 return; 169 } 170 171 aprint_naive("\n"); 172 aprint_normal(": ACPI CPU\n"); 173 174 rv = acpicpu_object(sc->sc_node->ad_handle, &sc->sc_object); 175 176 if (ACPI_FAILURE(rv)) 177 aprint_verbose_dev(self, "failed to obtain CPU object\n"); 178 179 acpicpu_count++; 180 acpicpu_sc[id] = sc; 181 182 sc->sc_cap = acpicpu_cap(sc); 183 sc->sc_ncpus = acpi_md_ncpus(); 184 sc->sc_flags = acpicpu_md_flags(); 185 186 KASSERT(acpicpu_count <= sc->sc_ncpus); 187 KASSERT(sc->sc_node->ad_device == NULL); 188 189 sc->sc_node->ad_device = self; 190 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE); 191 192 acpicpu_cstate_attach(self); 193 acpicpu_pstate_attach(self); 194 acpicpu_tstate_attach(self); 195 196 acpicpu_debug_print(self); 197 acpicpu_evcnt_attach(self); 198 199 (void)config_interrupts(self, acpicpu_start); 200 (void)acpi_register_notify(sc->sc_node, acpicpu_notify); 201 (void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume); 202 } 203 204 static int 205 acpicpu_detach(device_t self, int flags) 206 { 207 struct acpicpu_softc *sc = device_private(self); 208 209 sc->sc_cold = true; 210 211 acpicpu_evcnt_detach(self); 212 acpi_deregister_notify(sc->sc_node); 213 214 acpicpu_cstate_detach(self); 215 acpicpu_pstate_detach(self); 216 acpicpu_tstate_detach(self); 217 218 mutex_destroy(&sc->sc_mtx); 219 sc->sc_node->ad_device = NULL; 220 221 acpicpu_count--; 222 acpicpu_once_detach(); 223 224 return 0; 225 } 226 227 static int 228 acpicpu_once_attach(void) 229 { 230 struct acpicpu_softc *sc; 231 unsigned int i; 232 233 if (acpicpu_count != 0) 234 return 0; 235 236 KASSERT(acpicpu_sc == NULL); 237 KASSERT(acpicpu_log == NULL); 238 239 acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP); 240 241 if (acpicpu_sc == NULL) 242 return ENOMEM; 243 244 for (i = 0; i < maxcpus; i++) 245 acpicpu_sc[i] = NULL; 246 247 return 0; 248 } 249 250 static int 251 acpicpu_once_detach(void) 252 { 253 struct acpicpu_softc *sc; 254 255 if (acpicpu_count != 0) 256 return EDEADLK; 257 258 cpufreq_deregister(); 259 260 if (acpicpu_log != NULL) 261 sysctl_teardown(&acpicpu_log); 262 263 if (acpicpu_sc != NULL) 264 kmem_free(acpicpu_sc, maxcpus * sizeof(*sc)); 265 266 return 0; 267 } 268 269 static void 270 acpicpu_start(device_t self) 271 { 272 struct acpicpu_softc *sc = device_private(self); 273 static uint32_t count = 0; 274 struct cpufreq cf; 275 uint32_t i; 276 277 /* 278 * Run the state-specific initialization routines. These 279 * must run only once, after interrupts have been enabled, 280 * all CPUs are running, and all ACPI CPUs have attached. 281 */ 282 if (++count != acpicpu_count || acpicpu_count != sc->sc_ncpus) { 283 sc->sc_cold = false; 284 return; 285 } 286 287 /* 288 * Set the last ACPI CPU as non-cold 289 * only after C-states are enabled. 290 */ 291 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 292 acpicpu_cstate_start(self); 293 294 sc->sc_cold = false; 295 296 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 297 acpicpu_pstate_start(self); 298 299 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 300 acpicpu_tstate_start(self); 301 302 acpicpu_sysctl(self); 303 aprint_debug_dev(self, "ACPI CPUs started\n"); 304 305 /* 306 * Register with cpufreq(9). 307 */ 308 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) { 309 310 (void)memset(&cf, 0, sizeof(struct cpufreq)); 311 312 cf.cf_mp = false; 313 cf.cf_cookie = NULL; 314 cf.cf_get_freq = acpicpu_pstate_get; 315 cf.cf_set_freq = acpicpu_pstate_set; 316 cf.cf_state_count = sc->sc_pstate_count; 317 318 (void)strlcpy(cf.cf_name, "acpicpu", sizeof(cf.cf_name)); 319 320 for (i = 0; i < sc->sc_pstate_count; i++) { 321 322 if (sc->sc_pstate[i].ps_freq == 0) 323 continue; 324 325 cf.cf_state[i].cfs_freq = sc->sc_pstate[i].ps_freq; 326 cf.cf_state[i].cfs_power = sc->sc_pstate[i].ps_power; 327 } 328 329 if (cpufreq_register(&cf) != 0) 330 aprint_error_dev(self, "failed to register cpufreq\n"); 331 } 332 } 333 334 static void 335 acpicpu_sysctl(device_t self) 336 { 337 const struct sysctlnode *node; 338 int err; 339 340 KASSERT(acpicpu_log == NULL); 341 342 err = sysctl_createv(&acpicpu_log, 0, NULL, &node, 343 CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL, 344 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 345 346 if (err != 0) 347 goto fail; 348 349 err = sysctl_createv(&acpicpu_log, 0, &node, &node, 350 0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"), 351 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 352 353 if (err != 0) 354 goto fail; 355 356 err = sysctl_createv(&acpicpu_log, 0, &node, NULL, 357 CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic", 358 SYSCTL_DESCR("Dynamic states"), NULL, 0, 359 &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL); 360 361 if (err != 0) 362 goto fail; 363 364 err = sysctl_createv(&acpicpu_log, 0, &node, NULL, 365 CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive", 366 SYSCTL_DESCR("Passive cooling"), NULL, 0, 367 &acpicpu_passive, 0, CTL_CREATE, CTL_EOL); 368 369 if (err != 0) 370 goto fail; 371 372 return; 373 374 fail: 375 aprint_error_dev(self, "failed to initialize sysctl (err %d)\n", err); 376 } 377 378 static ACPI_STATUS 379 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao) 380 { 381 ACPI_OBJECT *obj; 382 ACPI_BUFFER buf; 383 ACPI_STATUS rv; 384 385 rv = acpi_eval_struct(hdl, NULL, &buf); 386 387 if (ACPI_FAILURE(rv)) 388 goto out; 389 390 obj = buf.Pointer; 391 392 if (obj->Type != ACPI_TYPE_PROCESSOR) { 393 rv = AE_TYPE; 394 goto out; 395 } 396 397 if (obj->Processor.ProcId > (uint32_t)maxcpus) { 398 rv = AE_LIMIT; 399 goto out; 400 } 401 402 KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX); 403 404 if (ao != NULL) { 405 ao->ao_procid = obj->Processor.ProcId; 406 ao->ao_pblklen = obj->Processor.PblkLength; 407 ao->ao_pblkaddr = obj->Processor.PblkAddress; 408 } 409 410 out: 411 if (buf.Pointer != NULL) 412 ACPI_FREE(buf.Pointer); 413 414 return rv; 415 } 416 417 static uint32_t 418 acpicpu_cap(struct acpicpu_softc *sc) 419 { 420 uint32_t flags, cap = 0; 421 ACPI_STATUS rv; 422 423 /* 424 * Query and set machine-dependent capabilities. 425 * Note that the Intel-specific _PDC method has 426 * already been evaluated. It was furthermore 427 * deprecated in the ACPI 3.0 in favor of _OSC. 428 */ 429 flags = acpi_md_pdc(); 430 rv = acpicpu_cap_osc(sc, flags, &cap); 431 432 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) { 433 434 aprint_error_dev(sc->sc_dev, "failed to evaluate " 435 "_OSC: %s\n", AcpiFormatException(rv)); 436 } 437 438 return (cap != 0) ? cap : flags; 439 } 440 441 static ACPI_STATUS 442 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val) 443 { 444 ACPI_OBJECT_LIST arg; 445 ACPI_OBJECT obj[4]; 446 ACPI_OBJECT *osc; 447 ACPI_BUFFER buf; 448 ACPI_STATUS rv; 449 uint32_t cap[2]; 450 uint32_t *ptr; 451 int i = 5; 452 453 static uint8_t intel_uuid[16] = { 454 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47, 455 0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53 456 }; 457 458 cap[0] = ACPI_OSC_QUERY; 459 cap[1] = flags; 460 461 again: 462 arg.Count = 4; 463 arg.Pointer = obj; 464 465 obj[0].Type = ACPI_TYPE_BUFFER; 466 obj[0].Buffer.Length = sizeof(intel_uuid); 467 obj[0].Buffer.Pointer = intel_uuid; 468 469 obj[1].Type = ACPI_TYPE_INTEGER; 470 obj[1].Integer.Value = ACPICPU_PDC_REVID; 471 472 obj[2].Type = ACPI_TYPE_INTEGER; 473 obj[2].Integer.Value = __arraycount(cap); 474 475 obj[3].Type = ACPI_TYPE_BUFFER; 476 obj[3].Buffer.Length = sizeof(cap); 477 obj[3].Buffer.Pointer = (void *)cap; 478 479 buf.Pointer = NULL; 480 buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER; 481 482 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf); 483 484 if (ACPI_FAILURE(rv)) 485 goto out; 486 487 osc = buf.Pointer; 488 489 if (osc->Type != ACPI_TYPE_BUFFER) { 490 rv = AE_TYPE; 491 goto out; 492 } 493 494 if (osc->Buffer.Length != sizeof(cap)) { 495 rv = AE_BUFFER_OVERFLOW; 496 goto out; 497 } 498 499 ptr = (uint32_t *)osc->Buffer.Pointer; 500 501 if ((ptr[0] & ACPI_OSC_ERROR) != 0) { 502 rv = AE_ERROR; 503 goto out; 504 } 505 506 if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) { 507 rv = AE_BAD_PARAMETER; 508 goto out; 509 } 510 511 /* 512 * "It is strongly recommended that the OS evaluate 513 * _OSC with the Query Support Flag set until _OSC 514 * returns the Capabilities Masked bit clear, to 515 * negotiate the set of features to be granted to 516 * the OS for native support (ACPI 4.0, 6.2.10)." 517 */ 518 if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) { 519 520 ACPI_FREE(buf.Pointer); 521 i--; 522 523 goto again; 524 } 525 526 if ((cap[0] & ACPI_OSC_QUERY) != 0) { 527 528 ACPI_FREE(buf.Pointer); 529 cap[0] &= ~ACPI_OSC_QUERY; 530 531 goto again; 532 } 533 534 /* 535 * It is permitted for _OSC to return all 536 * bits cleared, but this is specified to 537 * vary on per-device basis. Assume that 538 * everything rather than nothing will be 539 * supported in this case; we do not need 540 * the firmware to know the CPU features. 541 */ 542 *val = (ptr[1] != 0) ? ptr[1] : cap[1]; 543 544 out: 545 if (buf.Pointer != NULL) 546 ACPI_FREE(buf.Pointer); 547 548 return rv; 549 } 550 551 static void 552 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux) 553 { 554 ACPI_OSD_EXEC_CALLBACK func; 555 struct acpicpu_softc *sc; 556 device_t self = aux; 557 558 sc = device_private(self); 559 560 if (sc->sc_cold != false) 561 return; 562 563 if (acpicpu_dynamic != true) 564 return; 565 566 switch (evt) { 567 568 case ACPICPU_C_NOTIFY: 569 570 if ((sc->sc_flags & ACPICPU_FLAG_C) == 0) 571 return; 572 573 func = acpicpu_cstate_callback; 574 break; 575 576 case ACPICPU_P_NOTIFY: 577 578 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) 579 return; 580 581 func = acpicpu_pstate_callback; 582 break; 583 584 case ACPICPU_T_NOTIFY: 585 586 if ((sc->sc_flags & ACPICPU_FLAG_T) == 0) 587 return; 588 589 func = acpicpu_tstate_callback; 590 break; 591 592 default: 593 aprint_error_dev(sc->sc_dev, "unknown notify: 0x%02X\n", evt); 594 return; 595 } 596 597 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev); 598 } 599 600 static bool 601 acpicpu_suspend(device_t self, const pmf_qual_t *qual) 602 { 603 struct acpicpu_softc *sc = device_private(self); 604 605 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 606 (void)acpicpu_cstate_suspend(self); 607 608 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 609 (void)acpicpu_pstate_suspend(self); 610 611 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 612 (void)acpicpu_tstate_suspend(self); 613 614 sc->sc_cold = true; 615 616 return true; 617 } 618 619 static bool 620 acpicpu_resume(device_t self, const pmf_qual_t *qual) 621 { 622 struct acpicpu_softc *sc = device_private(self); 623 static const int handler = OSL_NOTIFY_HANDLER; 624 625 sc->sc_cold = false; 626 627 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 628 (void)AcpiOsExecute(handler, acpicpu_cstate_resume, self); 629 630 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 631 (void)AcpiOsExecute(handler, acpicpu_pstate_resume, self); 632 633 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 634 (void)AcpiOsExecute(handler, acpicpu_tstate_resume, self); 635 636 return true; 637 } 638 639 static void 640 acpicpu_evcnt_attach(device_t self) 641 { 642 struct acpicpu_softc *sc = device_private(self); 643 struct acpicpu_cstate *cs; 644 struct acpicpu_pstate *ps; 645 struct acpicpu_tstate *ts; 646 const char *str; 647 uint32_t i; 648 649 for (i = 0; i < __arraycount(sc->sc_cstate); i++) { 650 651 cs = &sc->sc_cstate[i]; 652 653 if (cs->cs_method == 0) 654 continue; 655 656 str = "HALT"; 657 658 if (cs->cs_method == ACPICPU_C_STATE_FFH) 659 str = "MWAIT"; 660 661 if (cs->cs_method == ACPICPU_C_STATE_SYSIO) 662 str = "I/O"; 663 664 (void)snprintf(cs->cs_name, sizeof(cs->cs_name), 665 "C%d (%s)", i, str); 666 667 evcnt_attach_dynamic(&cs->cs_evcnt, EVCNT_TYPE_MISC, 668 NULL, device_xname(sc->sc_dev), cs->cs_name); 669 } 670 671 for (i = 0; i < sc->sc_pstate_count; i++) { 672 673 ps = &sc->sc_pstate[i]; 674 675 if (ps->ps_freq == 0) 676 continue; 677 678 (void)snprintf(ps->ps_name, sizeof(ps->ps_name), 679 "P%u (%u MHz)", i, ps->ps_freq); 680 681 evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC, 682 NULL, device_xname(sc->sc_dev), ps->ps_name); 683 } 684 685 for (i = 0; i < sc->sc_tstate_count; i++) { 686 687 ts = &sc->sc_tstate[i]; 688 689 if (ts->ts_percent == 0) 690 continue; 691 692 (void)snprintf(ts->ts_name, sizeof(ts->ts_name), 693 "T%u (%u %%)", i, ts->ts_percent); 694 695 evcnt_attach_dynamic(&ts->ts_evcnt, EVCNT_TYPE_MISC, 696 NULL, device_xname(sc->sc_dev), ts->ts_name); 697 } 698 } 699 700 static void 701 acpicpu_evcnt_detach(device_t self) 702 { 703 struct acpicpu_softc *sc = device_private(self); 704 struct acpicpu_cstate *cs; 705 struct acpicpu_pstate *ps; 706 struct acpicpu_tstate *ts; 707 uint32_t i; 708 709 for (i = 0; i < __arraycount(sc->sc_cstate); i++) { 710 711 cs = &sc->sc_cstate[i]; 712 713 if (cs->cs_method != 0) 714 evcnt_detach(&cs->cs_evcnt); 715 } 716 717 for (i = 0; i < sc->sc_pstate_count; i++) { 718 719 ps = &sc->sc_pstate[i]; 720 721 if (ps->ps_freq != 0) 722 evcnt_detach(&ps->ps_evcnt); 723 } 724 725 for (i = 0; i < sc->sc_tstate_count; i++) { 726 727 ts = &sc->sc_tstate[i]; 728 729 if (ts->ts_percent != 0) 730 evcnt_detach(&ts->ts_evcnt); 731 } 732 } 733 734 static void 735 acpicpu_debug_print(device_t self) 736 { 737 struct acpicpu_softc *sc = device_private(self); 738 struct cpu_info *ci = sc->sc_ci; 739 struct acpicpu_cstate *cs; 740 struct acpicpu_pstate *ps; 741 struct acpicpu_tstate *ts; 742 static bool once = false; 743 struct acpicpu_dep *dep; 744 uint32_t i, method; 745 746 if (once != true) { 747 748 for (i = 0; i < __arraycount(sc->sc_cstate); i++) { 749 750 cs = &sc->sc_cstate[i]; 751 752 if (cs->cs_method == 0) 753 continue; 754 755 aprint_verbose_dev(sc->sc_dev, "C%d: %3s, " 756 "lat %3u us, pow %5u mW%s\n", i, 757 acpicpu_debug_print_method_c(cs->cs_method), 758 cs->cs_latency, cs->cs_power, 759 (cs->cs_flags != 0) ? ", bus master check" : ""); 760 } 761 762 method = sc->sc_pstate_control.reg_spaceid; 763 764 for (i = 0; i < sc->sc_pstate_count; i++) { 765 766 ps = &sc->sc_pstate[i]; 767 768 if (ps->ps_freq == 0) 769 continue; 770 771 aprint_verbose_dev(sc->sc_dev, "P%d: %3s, " 772 "lat %3u us, pow %5u mW, %4u MHz%s\n", i, 773 acpicpu_debug_print_method_pt(method), 774 ps->ps_latency, ps->ps_power, ps->ps_freq, 775 (ps->ps_flags & ACPICPU_FLAG_P_TURBO) != 0 ? 776 ", turbo boost" : ""); 777 } 778 779 method = sc->sc_tstate_control.reg_spaceid; 780 781 for (i = 0; i < sc->sc_tstate_count; i++) { 782 783 ts = &sc->sc_tstate[i]; 784 785 if (ts->ts_percent == 0) 786 continue; 787 788 aprint_verbose_dev(sc->sc_dev, "T%u: %3s, " 789 "lat %3u us, pow %5u mW, %3u %%\n", i, 790 acpicpu_debug_print_method_pt(method), 791 ts->ts_latency, ts->ts_power, ts->ts_percent); 792 } 793 794 once = true; 795 } 796 797 aprint_debug_dev(sc->sc_dev, "id %u, lapic id %u, " 798 "cap 0x%04x, flags 0x%08x\n", ci->ci_acpiid, 799 (uint32_t)ci->ci_cpuid, sc->sc_cap, sc->sc_flags); 800 801 if ((sc->sc_flags & ACPICPU_FLAG_C_DEP) != 0) { 802 803 dep = &sc->sc_cstate_dep; 804 805 aprint_debug_dev(sc->sc_dev, "C-state coordination: " 806 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus, 807 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type)); 808 } 809 810 if ((sc->sc_flags & ACPICPU_FLAG_P_DEP) != 0) { 811 812 dep = &sc->sc_pstate_dep; 813 814 aprint_debug_dev(sc->sc_dev, "P-state coordination: " 815 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus, 816 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type)); 817 } 818 819 if ((sc->sc_flags & ACPICPU_FLAG_T_DEP) != 0) { 820 821 dep = &sc->sc_tstate_dep; 822 823 aprint_debug_dev(sc->sc_dev, "T-state coordination: " 824 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus, 825 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type)); 826 } 827 } 828 829 static const char * 830 acpicpu_debug_print_method_c(uint8_t val) 831 { 832 833 if (val == ACPICPU_C_STATE_FFH) 834 return "FFH"; 835 836 if (val == ACPICPU_C_STATE_HALT) 837 return "HLT"; 838 839 if (val == ACPICPU_C_STATE_SYSIO) 840 return "I/O"; 841 842 return "???"; 843 } 844 845 static const char * 846 acpicpu_debug_print_method_pt(uint8_t val) 847 { 848 849 if (val == ACPI_ADR_SPACE_SYSTEM_IO) 850 return "I/O"; 851 852 if (val == ACPI_ADR_SPACE_FIXED_HARDWARE) 853 return "FFH"; 854 855 return "???"; 856 } 857 858 static const char * 859 acpicpu_debug_print_dep(uint32_t val) 860 { 861 862 switch (val) { 863 864 case ACPICPU_DEP_SW_ALL: 865 return "SW_ALL"; 866 867 case ACPICPU_DEP_SW_ANY: 868 return "SW_ANY"; 869 870 case ACPICPU_DEP_HW_ALL: 871 return "HW_ALL"; 872 873 default: 874 return "unknown"; 875 } 876 } 877 878 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL); 879 880 #ifdef _MODULE 881 #include "ioconf.c" 882 #endif 883 884 static int 885 acpicpu_modcmd(modcmd_t cmd, void *aux) 886 { 887 int rv = 0; 888 889 switch (cmd) { 890 891 case MODULE_CMD_INIT: 892 893 #ifdef _MODULE 894 rv = config_init_component(cfdriver_ioconf_acpicpu, 895 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu); 896 #endif 897 break; 898 899 case MODULE_CMD_FINI: 900 901 #ifdef _MODULE 902 rv = config_fini_component(cfdriver_ioconf_acpicpu, 903 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu); 904 #endif 905 break; 906 907 default: 908 rv = ENOTTY; 909 } 910 911 return rv; 912 } 913