1 /* $NetBSD: acpi_cpu.c,v 1.41 2011/06/12 10:11:52 jruoho Exp $ */ 2 3 /*- 4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.41 2011/06/12 10:11:52 jruoho Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/cpu.h> 34 #include <sys/evcnt.h> 35 #include <sys/kernel.h> 36 #include <sys/kmem.h> 37 #include <sys/module.h> 38 #include <sys/mutex.h> 39 #include <sys/sysctl.h> 40 41 #include <dev/acpi/acpireg.h> 42 #include <dev/acpi/acpivar.h> 43 #include <dev/acpi/acpi_cpu.h> 44 45 #include <machine/acpi_machdep.h> 46 #include <machine/cpuvar.h> 47 48 #define _COMPONENT ACPI_BUS_COMPONENT 49 ACPI_MODULE_NAME ("acpi_cpu") 50 51 static int acpicpu_match(device_t, cfdata_t, void *); 52 static void acpicpu_attach(device_t, device_t, void *); 53 static int acpicpu_detach(device_t, int); 54 static int acpicpu_once_attach(void); 55 static int acpicpu_once_detach(void); 56 static void acpicpu_start(device_t); 57 static void acpicpu_sysctl(device_t); 58 59 static ACPI_STATUS acpicpu_object(ACPI_HANDLE, struct acpicpu_object *); 60 static int acpicpu_find(struct cpu_info *, 61 struct acpi_devnode **); 62 static uint32_t acpicpu_cap(struct acpicpu_softc *); 63 static ACPI_STATUS acpicpu_cap_osc(struct acpicpu_softc *, 64 uint32_t, uint32_t *); 65 static void acpicpu_notify(ACPI_HANDLE, uint32_t, void *); 66 static bool acpicpu_suspend(device_t, const pmf_qual_t *); 67 static bool acpicpu_resume(device_t, const pmf_qual_t *); 68 static void acpicpu_evcnt_attach(device_t); 69 static void acpicpu_evcnt_detach(device_t); 70 static void acpicpu_debug_print(device_t); 71 static const char *acpicpu_debug_print_method(uint8_t); 72 static const char *acpicpu_debug_print_dep(uint32_t); 73 74 static uint32_t acpicpu_count = 0; 75 struct acpicpu_softc **acpicpu_sc = NULL; 76 static struct sysctllog *acpicpu_log = NULL; 77 static bool acpicpu_dynamic = true; 78 static bool acpicpu_passive = true; 79 80 static const struct { 81 const char *manu; 82 const char *prod; 83 const char *vers; 84 } acpicpu_quirks[] = { 85 { "Supermicro", "PDSMi-LN4", "0123456789" }, 86 }; 87 88 static const char * const acpicpu_hid[] = { 89 "ACPI0007", 90 NULL 91 }; 92 93 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc), 94 acpicpu_match, acpicpu_attach, acpicpu_detach, NULL); 95 96 static int 97 acpicpu_match(device_t parent, cfdata_t match, void *aux) 98 { 99 const char *manu, *prod, *vers; 100 struct cpu_info *ci; 101 size_t i; 102 103 if (acpi_softc == NULL) 104 return 0; 105 106 manu = pmf_get_platform("system-manufacturer"); 107 prod = pmf_get_platform("system-product-name"); 108 vers = pmf_get_platform("system-version"); 109 110 if (manu != NULL && prod != NULL && vers != NULL) { 111 112 for (i = 0; i < __arraycount(acpicpu_quirks); i++) { 113 114 if (strcasecmp(acpicpu_quirks[i].manu, manu) == 0 && 115 strcasecmp(acpicpu_quirks[i].prod, prod) == 0 && 116 strcasecmp(acpicpu_quirks[i].vers, vers) == 0) 117 return 0; 118 } 119 } 120 121 ci = acpicpu_md_match(parent, match, aux); 122 123 if (ci == NULL) 124 return 0; 125 126 return acpicpu_find(ci, NULL); 127 } 128 129 static void 130 acpicpu_attach(device_t parent, device_t self, void *aux) 131 { 132 struct acpicpu_softc *sc = device_private(self); 133 struct cpu_info *ci; 134 cpuid_t id; 135 int rv; 136 137 ci = acpicpu_md_attach(parent, self, aux); 138 139 if (ci == NULL) 140 return; 141 142 sc->sc_ci = ci; 143 sc->sc_dev = self; 144 sc->sc_cold = true; 145 sc->sc_node = NULL; 146 147 rv = acpicpu_find(ci, &sc->sc_node); 148 149 if (rv == 0) { 150 aprint_normal(": failed to match processor\n"); 151 return; 152 } 153 154 if (acpicpu_once_attach() != 0) { 155 aprint_normal(": failed to initialize\n"); 156 return; 157 } 158 159 KASSERT(acpi_softc != NULL); 160 KASSERT(acpicpu_sc != NULL); 161 KASSERT(sc->sc_node != NULL); 162 163 id = sc->sc_ci->ci_acpiid; 164 165 if (acpicpu_sc[id] != NULL) { 166 aprint_normal(": already attached\n"); 167 return; 168 } 169 170 aprint_naive("\n"); 171 aprint_normal(": ACPI CPU\n"); 172 173 rv = acpicpu_object(sc->sc_node->ad_handle, &sc->sc_object); 174 175 if (ACPI_FAILURE(rv)) 176 aprint_verbose_dev(self, "failed to obtain CPU object\n"); 177 178 acpicpu_count++; 179 acpicpu_sc[id] = sc; 180 181 sc->sc_cap = acpicpu_cap(sc); 182 sc->sc_ncpus = acpi_md_ncpus(); 183 sc->sc_flags = acpicpu_md_flags(); 184 185 KASSERT(acpicpu_count <= sc->sc_ncpus); 186 KASSERT(sc->sc_node->ad_device == NULL); 187 188 sc->sc_node->ad_device = self; 189 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE); 190 191 acpicpu_cstate_attach(self); 192 acpicpu_pstate_attach(self); 193 acpicpu_tstate_attach(self); 194 195 acpicpu_debug_print(self); 196 acpicpu_evcnt_attach(self); 197 198 (void)config_interrupts(self, acpicpu_start); 199 (void)acpi_register_notify(sc->sc_node, acpicpu_notify); 200 (void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume); 201 } 202 203 static int 204 acpicpu_detach(device_t self, int flags) 205 { 206 struct acpicpu_softc *sc = device_private(self); 207 int rv = 0; 208 209 sc->sc_cold = true; 210 211 acpicpu_evcnt_detach(self); 212 acpi_deregister_notify(sc->sc_node); 213 214 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 215 rv = acpicpu_cstate_detach(self); 216 217 if (rv != 0) 218 return rv; 219 220 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 221 rv = acpicpu_pstate_detach(self); 222 223 if (rv != 0) 224 return rv; 225 226 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 227 rv = acpicpu_tstate_detach(self); 228 229 if (rv != 0) 230 return rv; 231 232 mutex_destroy(&sc->sc_mtx); 233 234 sc->sc_node->ad_device = NULL; 235 236 acpicpu_count--; 237 acpicpu_once_detach(); 238 239 return 0; 240 } 241 242 static int 243 acpicpu_once_attach(void) 244 { 245 struct acpicpu_softc *sc; 246 unsigned int i; 247 248 if (acpicpu_count != 0) 249 return 0; 250 251 KASSERT(acpicpu_sc == NULL); 252 KASSERT(acpicpu_log == NULL); 253 254 acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP); 255 256 if (acpicpu_sc == NULL) 257 return ENOMEM; 258 259 for (i = 0; i < maxcpus; i++) 260 acpicpu_sc[i] = NULL; 261 262 return 0; 263 } 264 265 static int 266 acpicpu_once_detach(void) 267 { 268 struct acpicpu_softc *sc; 269 270 if (acpicpu_count != 0) 271 return EDEADLK; 272 273 if (acpicpu_log != NULL) 274 sysctl_teardown(&acpicpu_log); 275 276 if (acpicpu_sc != NULL) 277 kmem_free(acpicpu_sc, maxcpus * sizeof(*sc)); 278 279 return 0; 280 } 281 282 static void 283 acpicpu_start(device_t self) 284 { 285 struct acpicpu_softc *sc = device_private(self); 286 static uint32_t count = 0; 287 288 /* 289 * Run the state-specific initialization routines. These 290 * must run only once, after interrupts have been enabled, 291 * all CPUs are running, and all ACPI CPUs have attached. 292 */ 293 if (++count != acpicpu_count || acpicpu_count != sc->sc_ncpus) { 294 sc->sc_cold = false; 295 return; 296 } 297 298 /* 299 * Set the last ACPI CPU as non-cold 300 * only after C-states are enabled. 301 */ 302 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 303 acpicpu_cstate_start(self); 304 305 sc->sc_cold = false; 306 307 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 308 acpicpu_pstate_start(self); 309 310 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 311 acpicpu_tstate_start(self); 312 313 acpicpu_sysctl(self); 314 aprint_debug_dev(self, "ACPI CPUs started\n"); 315 } 316 317 static void 318 acpicpu_sysctl(device_t self) 319 { 320 const struct sysctlnode *node; 321 int err; 322 323 KASSERT(acpicpu_log == NULL); 324 325 err = sysctl_createv(&acpicpu_log, 0, NULL, &node, 326 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 327 NULL, 0, NULL, 0, CTL_HW, CTL_EOL); 328 329 if (err != 0) 330 goto fail; 331 332 err = sysctl_createv(&acpicpu_log, 0, &node, &node, 333 CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL, 334 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 335 336 if (err != 0) 337 goto fail; 338 339 err = sysctl_createv(&acpicpu_log, 0, &node, &node, 340 0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"), 341 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 342 343 if (err != 0) 344 goto fail; 345 346 err = sysctl_createv(&acpicpu_log, 0, &node, NULL, 347 CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic", 348 SYSCTL_DESCR("Dynamic states"), NULL, 0, 349 &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL); 350 351 if (err != 0) 352 goto fail; 353 354 err = sysctl_createv(&acpicpu_log, 0, &node, NULL, 355 CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive", 356 SYSCTL_DESCR("Passive cooling"), NULL, 0, 357 &acpicpu_passive, 0, CTL_CREATE, CTL_EOL); 358 359 if (err != 0) 360 goto fail; 361 362 return; 363 364 fail: 365 aprint_error_dev(self, "failed to initialize sysctl (err %d)\n", err); 366 } 367 368 static ACPI_STATUS 369 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao) 370 { 371 ACPI_OBJECT *obj; 372 ACPI_BUFFER buf; 373 ACPI_STATUS rv; 374 375 rv = acpi_eval_struct(hdl, NULL, &buf); 376 377 if (ACPI_FAILURE(rv)) 378 goto out; 379 380 obj = buf.Pointer; 381 382 if (obj->Type != ACPI_TYPE_PROCESSOR) { 383 rv = AE_TYPE; 384 goto out; 385 } 386 387 if (obj->Processor.ProcId > (uint32_t)maxcpus) { 388 rv = AE_LIMIT; 389 goto out; 390 } 391 392 KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX); 393 394 if (ao != NULL) { 395 ao->ao_procid = obj->Processor.ProcId; 396 ao->ao_pblklen = obj->Processor.PblkLength; 397 ao->ao_pblkaddr = obj->Processor.PblkAddress; 398 } 399 400 out: 401 if (buf.Pointer != NULL) 402 ACPI_FREE(buf.Pointer); 403 404 return rv; 405 } 406 407 static int 408 acpicpu_find(struct cpu_info *ci, struct acpi_devnode **ptr) 409 { 410 struct acpi_softc *sc = acpi_softc; 411 struct acpicpu_object ao; 412 struct acpi_devnode *ad; 413 ACPI_INTEGER val; 414 ACPI_STATUS rv; 415 416 if (sc == NULL || acpi_active == 0) 417 return 0; 418 419 /* 420 * CPUs are declared in the ACPI namespace 421 * either as a Processor() or as a Device(). 422 * In both cases the MADT entries are used 423 * for the match (see ACPI 4.0, section 8.4). 424 */ 425 SIMPLEQ_FOREACH(ad, &sc->ad_head, ad_list) { 426 427 if (ad->ad_type == ACPI_TYPE_PROCESSOR) { 428 429 rv = acpicpu_object(ad->ad_handle, &ao); 430 431 if (ACPI_SUCCESS(rv) && ci->ci_acpiid == ao.ao_procid) 432 goto out; 433 } 434 435 if (acpi_match_hid(ad->ad_devinfo, acpicpu_hid) != 0) { 436 437 rv = acpi_eval_integer(ad->ad_handle, "_UID", &val); 438 439 if (ACPI_SUCCESS(rv) && ci->ci_acpiid == val) 440 goto out; 441 } 442 } 443 444 return 0; 445 446 out: 447 if (ptr != NULL) 448 *ptr = ad; 449 450 return 10; 451 } 452 453 static uint32_t 454 acpicpu_cap(struct acpicpu_softc *sc) 455 { 456 uint32_t flags, cap = 0; 457 ACPI_STATUS rv; 458 459 /* 460 * Query and set machine-dependent capabilities. 461 * Note that the Intel-specific _PDC method has 462 * already been evaluated. It was furthermore 463 * deprecated in the ACPI 3.0 in favor of _OSC. 464 */ 465 flags = acpi_md_pdc(); 466 rv = acpicpu_cap_osc(sc, flags, &cap); 467 468 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) { 469 470 aprint_error_dev(sc->sc_dev, "failed to evaluate " 471 "_OSC: %s\n", AcpiFormatException(rv)); 472 } 473 474 return (cap != 0) ? cap : flags; 475 } 476 477 static ACPI_STATUS 478 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val) 479 { 480 ACPI_OBJECT_LIST arg; 481 ACPI_OBJECT obj[4]; 482 ACPI_OBJECT *osc; 483 ACPI_BUFFER buf; 484 ACPI_STATUS rv; 485 uint32_t cap[2]; 486 uint32_t *ptr; 487 int i = 5; 488 489 static uint8_t intel_uuid[16] = { 490 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47, 491 0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53 492 }; 493 494 cap[0] = ACPI_OSC_QUERY; 495 cap[1] = flags; 496 497 again: 498 arg.Count = 4; 499 arg.Pointer = obj; 500 501 obj[0].Type = ACPI_TYPE_BUFFER; 502 obj[0].Buffer.Length = sizeof(intel_uuid); 503 obj[0].Buffer.Pointer = intel_uuid; 504 505 obj[1].Type = ACPI_TYPE_INTEGER; 506 obj[1].Integer.Value = ACPICPU_PDC_REVID; 507 508 obj[2].Type = ACPI_TYPE_INTEGER; 509 obj[2].Integer.Value = __arraycount(cap); 510 511 obj[3].Type = ACPI_TYPE_BUFFER; 512 obj[3].Buffer.Length = sizeof(cap); 513 obj[3].Buffer.Pointer = (void *)cap; 514 515 buf.Pointer = NULL; 516 buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER; 517 518 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf); 519 520 if (ACPI_FAILURE(rv)) 521 goto out; 522 523 osc = buf.Pointer; 524 525 if (osc->Type != ACPI_TYPE_BUFFER) { 526 rv = AE_TYPE; 527 goto out; 528 } 529 530 if (osc->Buffer.Length != sizeof(cap)) { 531 rv = AE_BUFFER_OVERFLOW; 532 goto out; 533 } 534 535 ptr = (uint32_t *)osc->Buffer.Pointer; 536 537 if ((ptr[0] & ACPI_OSC_ERROR) != 0) { 538 rv = AE_ERROR; 539 goto out; 540 } 541 542 if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) { 543 rv = AE_BAD_PARAMETER; 544 goto out; 545 } 546 547 /* 548 * "It is strongly recommended that the OS evaluate 549 * _OSC with the Query Support Flag set until _OSC 550 * returns the Capabilities Masked bit clear, to 551 * negotiate the set of features to be granted to 552 * the OS for native support (ACPI 4.0, 6.2.10)." 553 */ 554 if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) { 555 556 ACPI_FREE(buf.Pointer); 557 i--; 558 559 goto again; 560 } 561 562 if ((cap[0] & ACPI_OSC_QUERY) != 0) { 563 564 ACPI_FREE(buf.Pointer); 565 cap[0] &= ~ACPI_OSC_QUERY; 566 567 goto again; 568 } 569 570 /* 571 * It is permitted for _OSC to return all 572 * bits cleared, but this is specified to 573 * vary on per-device basis. Assume that 574 * everything rather than nothing will be 575 * supported in this case; we do not need 576 * the firmware to know the CPU features. 577 */ 578 *val = (ptr[1] != 0) ? ptr[1] : cap[1]; 579 580 out: 581 if (buf.Pointer != NULL) 582 ACPI_FREE(buf.Pointer); 583 584 return rv; 585 } 586 587 static void 588 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux) 589 { 590 ACPI_OSD_EXEC_CALLBACK func; 591 struct acpicpu_softc *sc; 592 device_t self = aux; 593 594 sc = device_private(self); 595 596 if (sc->sc_cold != false) 597 return; 598 599 if (acpicpu_dynamic != true) 600 return; 601 602 switch (evt) { 603 604 case ACPICPU_C_NOTIFY: 605 606 if ((sc->sc_flags & ACPICPU_FLAG_C) == 0) 607 return; 608 609 func = acpicpu_cstate_callback; 610 break; 611 612 case ACPICPU_P_NOTIFY: 613 614 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) 615 return; 616 617 func = acpicpu_pstate_callback; 618 break; 619 620 case ACPICPU_T_NOTIFY: 621 622 if ((sc->sc_flags & ACPICPU_FLAG_T) == 0) 623 return; 624 625 func = acpicpu_tstate_callback; 626 break; 627 628 default: 629 aprint_error_dev(sc->sc_dev, "unknown notify: 0x%02X\n", evt); 630 return; 631 } 632 633 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev); 634 } 635 636 static bool 637 acpicpu_suspend(device_t self, const pmf_qual_t *qual) 638 { 639 struct acpicpu_softc *sc = device_private(self); 640 641 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 642 (void)acpicpu_cstate_suspend(self); 643 644 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 645 (void)acpicpu_pstate_suspend(self); 646 647 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 648 (void)acpicpu_tstate_suspend(self); 649 650 sc->sc_cold = true; 651 652 return true; 653 } 654 655 static bool 656 acpicpu_resume(device_t self, const pmf_qual_t *qual) 657 { 658 struct acpicpu_softc *sc = device_private(self); 659 static const int handler = OSL_NOTIFY_HANDLER; 660 661 sc->sc_cold = false; 662 663 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 664 (void)AcpiOsExecute(handler, acpicpu_cstate_resume, self); 665 666 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 667 (void)AcpiOsExecute(handler, acpicpu_pstate_resume, self); 668 669 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 670 (void)AcpiOsExecute(handler, acpicpu_tstate_resume, self); 671 672 return true; 673 } 674 675 static void 676 acpicpu_evcnt_attach(device_t self) 677 { 678 struct acpicpu_softc *sc = device_private(self); 679 struct acpicpu_cstate *cs; 680 struct acpicpu_pstate *ps; 681 struct acpicpu_tstate *ts; 682 const char *str; 683 uint32_t i; 684 685 for (i = 0; i < __arraycount(sc->sc_cstate); i++) { 686 687 cs = &sc->sc_cstate[i]; 688 689 if (cs->cs_method == 0) 690 continue; 691 692 str = "HALT"; 693 694 if (cs->cs_method == ACPICPU_C_STATE_FFH) 695 str = "MWAIT"; 696 697 if (cs->cs_method == ACPICPU_C_STATE_SYSIO) 698 str = "I/O"; 699 700 (void)snprintf(cs->cs_name, sizeof(cs->cs_name), 701 "C%d (%s)", i, str); 702 703 evcnt_attach_dynamic(&cs->cs_evcnt, EVCNT_TYPE_MISC, 704 NULL, device_xname(sc->sc_dev), cs->cs_name); 705 } 706 707 for (i = 0; i < sc->sc_pstate_count; i++) { 708 709 ps = &sc->sc_pstate[i]; 710 711 if (ps->ps_freq == 0) 712 continue; 713 714 (void)snprintf(ps->ps_name, sizeof(ps->ps_name), 715 "P%u (%u MHz)", i, ps->ps_freq); 716 717 evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC, 718 NULL, device_xname(sc->sc_dev), ps->ps_name); 719 } 720 721 for (i = 0; i < sc->sc_tstate_count; i++) { 722 723 ts = &sc->sc_tstate[i]; 724 725 if (ts->ts_percent == 0) 726 continue; 727 728 (void)snprintf(ts->ts_name, sizeof(ts->ts_name), 729 "T%u (%u %%)", i, ts->ts_percent); 730 731 evcnt_attach_dynamic(&ts->ts_evcnt, EVCNT_TYPE_MISC, 732 NULL, device_xname(sc->sc_dev), ts->ts_name); 733 } 734 } 735 736 static void 737 acpicpu_evcnt_detach(device_t self) 738 { 739 struct acpicpu_softc *sc = device_private(self); 740 struct acpicpu_cstate *cs; 741 struct acpicpu_pstate *ps; 742 struct acpicpu_tstate *ts; 743 uint32_t i; 744 745 for (i = 0; i < __arraycount(sc->sc_cstate); i++) { 746 747 cs = &sc->sc_cstate[i]; 748 749 if (cs->cs_method != 0) 750 evcnt_detach(&cs->cs_evcnt); 751 } 752 753 for (i = 0; i < sc->sc_pstate_count; i++) { 754 755 ps = &sc->sc_pstate[i]; 756 757 if (ps->ps_freq != 0) 758 evcnt_detach(&ps->ps_evcnt); 759 } 760 761 for (i = 0; i < sc->sc_tstate_count; i++) { 762 763 ts = &sc->sc_tstate[i]; 764 765 if (ts->ts_percent != 0) 766 evcnt_detach(&ts->ts_evcnt); 767 } 768 } 769 770 static void 771 acpicpu_debug_print(device_t self) 772 { 773 struct acpicpu_softc *sc = device_private(self); 774 struct cpu_info *ci = sc->sc_ci; 775 struct acpicpu_cstate *cs; 776 struct acpicpu_pstate *ps; 777 struct acpicpu_tstate *ts; 778 static bool once = false; 779 struct acpicpu_dep *dep; 780 uint32_t i, method; 781 782 if (once != true) { 783 784 for (i = 0; i < __arraycount(sc->sc_cstate); i++) { 785 786 cs = &sc->sc_cstate[i]; 787 788 if (cs->cs_method == 0) 789 continue; 790 791 aprint_verbose_dev(sc->sc_dev, "C%d: %3s, " 792 "lat %3u us, pow %5u mW%s\n", i, 793 acpicpu_debug_print_method(cs->cs_method), 794 cs->cs_latency, cs->cs_power, 795 (cs->cs_flags != 0) ? ", bus master check" : ""); 796 } 797 798 method = sc->sc_pstate_control.reg_spaceid; 799 800 for (i = 0; i < sc->sc_pstate_count; i++) { 801 802 ps = &sc->sc_pstate[i]; 803 804 if (ps->ps_freq == 0) 805 continue; 806 807 aprint_verbose_dev(sc->sc_dev, "P%d: %3s, " 808 "lat %3u us, pow %5u mW, %4u MHz%s\n", i, 809 acpicpu_debug_print_method(method), 810 ps->ps_latency, ps->ps_power, ps->ps_freq, 811 (ps->ps_flags & ACPICPU_FLAG_P_TURBO) != 0 ? 812 ", turbo boost" : ""); 813 } 814 815 method = sc->sc_tstate_control.reg_spaceid; 816 817 for (i = 0; i < sc->sc_tstate_count; i++) { 818 819 ts = &sc->sc_tstate[i]; 820 821 if (ts->ts_percent == 0) 822 continue; 823 824 aprint_verbose_dev(sc->sc_dev, "T%u: %3s, " 825 "lat %3u us, pow %5u mW, %3u %%\n", i, 826 acpicpu_debug_print_method(method), 827 ts->ts_latency, ts->ts_power, ts->ts_percent); 828 } 829 830 once = true; 831 } 832 833 aprint_debug_dev(sc->sc_dev, "id %u, lapic id %u, " 834 "cap 0x%04x, flags 0x%08x\n", ci->ci_acpiid, 835 (uint32_t)ci->ci_cpuid, sc->sc_cap, sc->sc_flags); 836 837 if ((sc->sc_flags & ACPICPU_FLAG_C_DEP) != 0) { 838 839 dep = &sc->sc_cstate_dep; 840 841 aprint_debug_dev(sc->sc_dev, "C-state coordination: " 842 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus, 843 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type)); 844 } 845 846 if ((sc->sc_flags & ACPICPU_FLAG_P_DEP) != 0) { 847 848 dep = &sc->sc_pstate_dep; 849 850 aprint_debug_dev(sc->sc_dev, "P-state coordination: " 851 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus, 852 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type)); 853 } 854 855 if ((sc->sc_flags & ACPICPU_FLAG_T_DEP) != 0) { 856 857 dep = &sc->sc_tstate_dep; 858 859 aprint_debug_dev(sc->sc_dev, "T-state coordination: " 860 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus, 861 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type)); 862 } 863 } 864 865 static const char * 866 acpicpu_debug_print_method(uint8_t val) 867 { 868 869 if (val == ACPICPU_C_STATE_FFH) 870 return "FFH"; 871 872 if (val == ACPICPU_C_STATE_HALT) 873 return "HLT"; 874 875 if (val == ACPICPU_C_STATE_SYSIO) 876 return "I/O"; 877 878 if (val == ACPI_ADR_SPACE_SYSTEM_IO) 879 return "I/O"; 880 881 if (val == ACPI_ADR_SPACE_FIXED_HARDWARE) 882 return "FFH"; 883 884 return "???"; 885 } 886 887 static const char * 888 acpicpu_debug_print_dep(uint32_t val) 889 { 890 891 switch (val) { 892 893 case ACPICPU_DEP_SW_ALL: 894 return "SW_ALL"; 895 896 case ACPICPU_DEP_SW_ANY: 897 return "SW_ANY"; 898 899 case ACPICPU_DEP_HW_ALL: 900 return "HW_ALL"; 901 902 default: 903 return "unknown"; 904 } 905 } 906 907 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL); 908 909 #ifdef _MODULE 910 #include "ioconf.c" 911 #endif 912 913 static int 914 acpicpu_modcmd(modcmd_t cmd, void *aux) 915 { 916 int rv = 0; 917 918 switch (cmd) { 919 920 case MODULE_CMD_INIT: 921 922 #ifdef _MODULE 923 rv = config_init_component(cfdriver_ioconf_acpicpu, 924 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu); 925 #endif 926 break; 927 928 case MODULE_CMD_FINI: 929 930 #ifdef _MODULE 931 rv = config_fini_component(cfdriver_ioconf_acpicpu, 932 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu); 933 #endif 934 break; 935 936 default: 937 rv = ENOTTY; 938 } 939 940 return rv; 941 } 942