1 /* $NetBSD: acpi_cpu.c,v 1.35 2011/03/02 06:17:08 jruoho Exp $ */ 2 3 /*- 4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.35 2011/03/02 06:17:08 jruoho Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/cpu.h> 34 #include <sys/evcnt.h> 35 #include <sys/kernel.h> 36 #include <sys/kmem.h> 37 #include <sys/module.h> 38 #include <sys/mutex.h> 39 #include <sys/sysctl.h> 40 41 #include <dev/acpi/acpireg.h> 42 #include <dev/acpi/acpivar.h> 43 #include <dev/acpi/acpi_cpu.h> 44 45 #include <machine/acpi_machdep.h> 46 #include <machine/cpuvar.h> 47 48 #define _COMPONENT ACPI_BUS_COMPONENT 49 ACPI_MODULE_NAME ("acpi_cpu") 50 51 static int acpicpu_match(device_t, cfdata_t, void *); 52 static void acpicpu_attach(device_t, device_t, void *); 53 static int acpicpu_detach(device_t, int); 54 static int acpicpu_once_attach(void); 55 static int acpicpu_once_detach(void); 56 static void acpicpu_start(device_t); 57 static void acpicpu_sysctl(device_t); 58 59 static ACPI_STATUS acpicpu_object(ACPI_HANDLE, struct acpicpu_object *); 60 static int acpicpu_find(struct cpu_info *, 61 struct acpi_devnode **); 62 static uint32_t acpicpu_cap(struct acpicpu_softc *); 63 static ACPI_STATUS acpicpu_cap_pdc(struct acpicpu_softc *, uint32_t); 64 static ACPI_STATUS acpicpu_cap_osc(struct acpicpu_softc *, 65 uint32_t, uint32_t *); 66 static void acpicpu_notify(ACPI_HANDLE, uint32_t, void *); 67 static bool acpicpu_suspend(device_t, const pmf_qual_t *); 68 static bool acpicpu_resume(device_t, const pmf_qual_t *); 69 static void acpicpu_evcnt_attach(device_t); 70 static void acpicpu_evcnt_detach(device_t); 71 static void acpicpu_debug_print(device_t); 72 static const char *acpicpu_debug_print_method(uint8_t); 73 static const char *acpicpu_debug_print_dep(uint32_t); 74 75 static uint32_t acpicpu_count = 0; 76 struct acpicpu_softc **acpicpu_sc = NULL; 77 static struct sysctllog *acpicpu_log = NULL; 78 static bool acpicpu_dynamic = true; 79 static bool acpicpu_passive = true; 80 81 static const char * const acpicpu_hid[] = { 82 "ACPI0007", 83 NULL 84 }; 85 86 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc), 87 acpicpu_match, acpicpu_attach, acpicpu_detach, NULL); 88 89 static int 90 acpicpu_match(device_t parent, cfdata_t match, void *aux) 91 { 92 struct cpu_info *ci; 93 94 if (acpi_softc == NULL) 95 return 0; 96 97 ci = acpicpu_md_match(parent, match, aux); 98 99 if (ci == NULL) 100 return 0; 101 102 return acpicpu_find(ci, NULL); 103 } 104 105 static void 106 acpicpu_attach(device_t parent, device_t self, void *aux) 107 { 108 struct acpicpu_softc *sc = device_private(self); 109 struct cpu_info *ci; 110 cpuid_t id; 111 int rv; 112 113 ci = acpicpu_md_attach(parent, self, aux); 114 115 if (ci == NULL) 116 return; 117 118 sc->sc_ci = ci; 119 sc->sc_dev = self; 120 sc->sc_cold = true; 121 sc->sc_node = NULL; 122 123 rv = acpicpu_find(ci, &sc->sc_node); 124 125 if (rv == 0) { 126 aprint_normal(": failed to match processor\n"); 127 return; 128 } 129 130 if (acpicpu_once_attach() != 0) { 131 aprint_normal(": failed to initialize\n"); 132 return; 133 } 134 135 KASSERT(acpi_softc != NULL); 136 KASSERT(acpicpu_sc != NULL); 137 KASSERT(sc->sc_node != NULL); 138 139 id = sc->sc_ci->ci_acpiid; 140 141 if (acpicpu_sc[id] != NULL) { 142 aprint_normal(": already attached\n"); 143 return; 144 } 145 146 aprint_naive("\n"); 147 aprint_normal(": ACPI CPU\n"); 148 149 rv = acpicpu_object(sc->sc_node->ad_handle, &sc->sc_object); 150 151 if (ACPI_FAILURE(rv)) 152 aprint_verbose_dev(self, "failed to obtain CPU object\n"); 153 154 acpicpu_count++; 155 acpicpu_sc[id] = sc; 156 157 sc->sc_cap = acpicpu_cap(sc); 158 sc->sc_ncpus = acpi_md_ncpus(); 159 sc->sc_flags = acpicpu_md_flags(); 160 161 KASSERT(acpicpu_count <= sc->sc_ncpus); 162 KASSERT(sc->sc_node->ad_device == NULL); 163 164 sc->sc_node->ad_device = self; 165 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE); 166 167 acpicpu_cstate_attach(self); 168 acpicpu_pstate_attach(self); 169 acpicpu_tstate_attach(self); 170 171 acpicpu_debug_print(self); 172 acpicpu_evcnt_attach(self); 173 174 (void)config_interrupts(self, acpicpu_start); 175 (void)acpi_register_notify(sc->sc_node, acpicpu_notify); 176 (void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume); 177 } 178 179 static int 180 acpicpu_detach(device_t self, int flags) 181 { 182 struct acpicpu_softc *sc = device_private(self); 183 int rv = 0; 184 185 sc->sc_cold = true; 186 187 acpicpu_evcnt_detach(self); 188 acpi_deregister_notify(sc->sc_node); 189 190 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 191 rv = acpicpu_cstate_detach(self); 192 193 if (rv != 0) 194 return rv; 195 196 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 197 rv = acpicpu_pstate_detach(self); 198 199 if (rv != 0) 200 return rv; 201 202 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 203 rv = acpicpu_tstate_detach(self); 204 205 if (rv != 0) 206 return rv; 207 208 mutex_destroy(&sc->sc_mtx); 209 210 sc->sc_node->ad_device = NULL; 211 212 acpicpu_count--; 213 acpicpu_once_detach(); 214 215 return 0; 216 } 217 218 static int 219 acpicpu_once_attach(void) 220 { 221 struct acpicpu_softc *sc; 222 unsigned int i; 223 224 if (acpicpu_count != 0) 225 return 0; 226 227 KASSERT(acpicpu_sc == NULL); 228 KASSERT(acpicpu_log == NULL); 229 230 acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP); 231 232 if (acpicpu_sc == NULL) 233 return ENOMEM; 234 235 for (i = 0; i < maxcpus; i++) 236 acpicpu_sc[i] = NULL; 237 238 return 0; 239 } 240 241 static int 242 acpicpu_once_detach(void) 243 { 244 struct acpicpu_softc *sc; 245 246 if (acpicpu_count != 0) 247 return EDEADLK; 248 249 if (acpicpu_log != NULL) 250 sysctl_teardown(&acpicpu_log); 251 252 if (acpicpu_sc != NULL) 253 kmem_free(acpicpu_sc, maxcpus * sizeof(*sc)); 254 255 return 0; 256 } 257 258 static void 259 acpicpu_start(device_t self) 260 { 261 struct acpicpu_softc *sc = device_private(self); 262 static uint32_t count = 0; 263 264 /* 265 * Run the state-specific initialization routines. These 266 * must run only once, after interrupts have been enabled, 267 * all CPUs are running, and all ACPI CPUs have attached. 268 */ 269 if (++count != acpicpu_count || acpicpu_count != sc->sc_ncpus) { 270 sc->sc_cold = false; 271 return; 272 } 273 274 /* 275 * Set the last ACPI CPU as non-cold 276 * only after C-states are enabled. 277 */ 278 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 279 acpicpu_cstate_start(self); 280 281 sc->sc_cold = false; 282 283 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 284 acpicpu_pstate_start(self); 285 286 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 287 acpicpu_tstate_start(self); 288 289 acpicpu_sysctl(self); 290 aprint_debug_dev(self, "ACPI CPUs started\n"); 291 } 292 293 static void 294 acpicpu_sysctl(device_t self) 295 { 296 const struct sysctlnode *node; 297 int err; 298 299 KASSERT(acpicpu_log == NULL); 300 301 err = sysctl_createv(&acpicpu_log, 0, NULL, &node, 302 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 303 NULL, 0, NULL, 0, CTL_HW, CTL_EOL); 304 305 if (err != 0) 306 goto fail; 307 308 err = sysctl_createv(&acpicpu_log, 0, &node, &node, 309 CTLFLAG_PERMANENT, CTLTYPE_NODE, "acpi", NULL, 310 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 311 312 if (err != 0) 313 goto fail; 314 315 err = sysctl_createv(&acpicpu_log, 0, &node, &node, 316 0, CTLTYPE_NODE, "cpu", SYSCTL_DESCR("ACPI CPU"), 317 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 318 319 if (err != 0) 320 goto fail; 321 322 err = sysctl_createv(&acpicpu_log, 0, &node, NULL, 323 CTLFLAG_READWRITE, CTLTYPE_BOOL, "dynamic", 324 SYSCTL_DESCR("Dynamic states"), NULL, 0, 325 &acpicpu_dynamic, 0, CTL_CREATE, CTL_EOL); 326 327 if (err != 0) 328 goto fail; 329 330 err = sysctl_createv(&acpicpu_log, 0, &node, NULL, 331 CTLFLAG_READWRITE, CTLTYPE_BOOL, "passive", 332 SYSCTL_DESCR("Passive cooling"), NULL, 0, 333 &acpicpu_passive, 0, CTL_CREATE, CTL_EOL); 334 335 if (err != 0) 336 goto fail; 337 338 return; 339 340 fail: 341 aprint_error_dev(self, "failed to initialize sysctl (err %d)\n", err); 342 } 343 344 static ACPI_STATUS 345 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao) 346 { 347 ACPI_OBJECT *obj; 348 ACPI_BUFFER buf; 349 ACPI_STATUS rv; 350 351 rv = acpi_eval_struct(hdl, NULL, &buf); 352 353 if (ACPI_FAILURE(rv)) 354 goto out; 355 356 obj = buf.Pointer; 357 358 if (obj->Type != ACPI_TYPE_PROCESSOR) { 359 rv = AE_TYPE; 360 goto out; 361 } 362 363 if (obj->Processor.ProcId > (uint32_t)maxcpus) { 364 rv = AE_LIMIT; 365 goto out; 366 } 367 368 KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX); 369 370 if (ao != NULL) { 371 ao->ao_procid = obj->Processor.ProcId; 372 ao->ao_pblklen = obj->Processor.PblkLength; 373 ao->ao_pblkaddr = obj->Processor.PblkAddress; 374 } 375 376 out: 377 if (buf.Pointer != NULL) 378 ACPI_FREE(buf.Pointer); 379 380 return rv; 381 } 382 383 static int 384 acpicpu_find(struct cpu_info *ci, struct acpi_devnode **ptr) 385 { 386 struct acpi_softc *sc = acpi_softc; 387 struct acpicpu_object ao; 388 struct acpi_devnode *ad; 389 ACPI_INTEGER val; 390 ACPI_STATUS rv; 391 392 if (sc == NULL || acpi_active == 0) 393 return 0; 394 395 /* 396 * CPUs are declared in the ACPI namespace 397 * either as a Processor() or as a Device(). 398 * In both cases the MADT entries are used 399 * for the match (see ACPI 4.0, section 8.4). 400 */ 401 SIMPLEQ_FOREACH(ad, &sc->ad_head, ad_list) { 402 403 if (ad->ad_type == ACPI_TYPE_PROCESSOR) { 404 405 rv = acpicpu_object(ad->ad_handle, &ao); 406 407 if (ACPI_SUCCESS(rv) && ci->ci_acpiid == ao.ao_procid) 408 goto out; 409 } 410 411 if (acpi_match_hid(ad->ad_devinfo, acpicpu_hid) != 0) { 412 413 rv = acpi_eval_integer(ad->ad_handle, "_UID", &val); 414 415 if (ACPI_SUCCESS(rv) && ci->ci_acpiid == val) 416 goto out; 417 } 418 } 419 420 return 0; 421 422 out: 423 if (ptr != NULL) 424 *ptr = ad; 425 426 return 10; 427 } 428 429 static uint32_t 430 acpicpu_cap(struct acpicpu_softc *sc) 431 { 432 uint32_t flags, cap = 0; 433 const char *str; 434 ACPI_STATUS rv; 435 436 /* 437 * Query and set machine-dependent capabilities. 438 * Note that the Intel-specific _PDC method was 439 * deprecated in the ACPI 3.0 in favor of _OSC. 440 */ 441 flags = acpicpu_md_cap(); 442 rv = acpicpu_cap_osc(sc, flags, &cap); 443 444 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) { 445 str = "_OSC"; 446 goto fail; 447 } 448 449 rv = acpicpu_cap_pdc(sc, flags); 450 451 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) { 452 str = "_PDC"; 453 goto fail; 454 } 455 456 if (cap == 0) 457 cap = flags; 458 459 return cap; 460 461 fail: 462 aprint_error_dev(sc->sc_dev, "failed to evaluate " 463 "%s: %s\n", str, AcpiFormatException(rv)); 464 465 return 0; 466 } 467 468 static ACPI_STATUS 469 acpicpu_cap_pdc(struct acpicpu_softc *sc, uint32_t flags) 470 { 471 ACPI_OBJECT_LIST arg; 472 ACPI_OBJECT obj; 473 uint32_t cap[3]; 474 475 arg.Count = 1; 476 arg.Pointer = &obj; 477 478 cap[0] = ACPICPU_PDC_REVID; 479 cap[1] = 1; 480 cap[2] = flags; 481 482 obj.Type = ACPI_TYPE_BUFFER; 483 obj.Buffer.Length = sizeof(cap); 484 obj.Buffer.Pointer = (void *)cap; 485 486 return AcpiEvaluateObject(sc->sc_node->ad_handle, "_PDC", &arg, NULL); 487 } 488 489 static ACPI_STATUS 490 acpicpu_cap_osc(struct acpicpu_softc *sc, uint32_t flags, uint32_t *val) 491 { 492 ACPI_OBJECT_LIST arg; 493 ACPI_OBJECT obj[4]; 494 ACPI_OBJECT *osc; 495 ACPI_BUFFER buf; 496 ACPI_STATUS rv; 497 uint32_t cap[2]; 498 uint32_t *ptr; 499 int i = 5; 500 501 static uint8_t intel_uuid[16] = { 502 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47, 503 0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53 504 }; 505 506 cap[0] = ACPI_OSC_QUERY; 507 cap[1] = flags; 508 509 again: 510 arg.Count = 4; 511 arg.Pointer = obj; 512 513 obj[0].Type = ACPI_TYPE_BUFFER; 514 obj[0].Buffer.Length = sizeof(intel_uuid); 515 obj[0].Buffer.Pointer = intel_uuid; 516 517 obj[1].Type = ACPI_TYPE_INTEGER; 518 obj[1].Integer.Value = ACPICPU_PDC_REVID; 519 520 obj[2].Type = ACPI_TYPE_INTEGER; 521 obj[2].Integer.Value = __arraycount(cap); 522 523 obj[3].Type = ACPI_TYPE_BUFFER; 524 obj[3].Buffer.Length = sizeof(cap); 525 obj[3].Buffer.Pointer = (void *)cap; 526 527 buf.Pointer = NULL; 528 buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER; 529 530 rv = AcpiEvaluateObject(sc->sc_node->ad_handle, "_OSC", &arg, &buf); 531 532 if (ACPI_FAILURE(rv)) 533 goto out; 534 535 osc = buf.Pointer; 536 537 if (osc->Type != ACPI_TYPE_BUFFER) { 538 rv = AE_TYPE; 539 goto out; 540 } 541 542 if (osc->Buffer.Length != sizeof(cap)) { 543 rv = AE_BUFFER_OVERFLOW; 544 goto out; 545 } 546 547 ptr = (uint32_t *)osc->Buffer.Pointer; 548 549 if ((ptr[0] & ACPI_OSC_ERROR) != 0) { 550 rv = AE_ERROR; 551 goto out; 552 } 553 554 if ((ptr[0] & (ACPI_OSC_ERROR_REV | ACPI_OSC_ERROR_UUID)) != 0) { 555 rv = AE_BAD_PARAMETER; 556 goto out; 557 } 558 559 /* 560 * "It is strongly recommended that the OS evaluate 561 * _OSC with the Query Support Flag set until _OSC 562 * returns the Capabilities Masked bit clear, to 563 * negotiate the set of features to be granted to 564 * the OS for native support (ACPI 4.0, 6.2.10)." 565 */ 566 if ((ptr[0] & ACPI_OSC_ERROR_MASKED) != 0 && i >= 0) { 567 568 ACPI_FREE(buf.Pointer); 569 i--; 570 571 goto again; 572 } 573 574 if ((cap[0] & ACPI_OSC_QUERY) != 0) { 575 576 ACPI_FREE(buf.Pointer); 577 cap[0] &= ~ACPI_OSC_QUERY; 578 579 goto again; 580 } 581 582 /* 583 * It is permitted for _OSC to return all 584 * bits cleared, but this is specified to 585 * vary on per-device basis. Assume that 586 * everything rather than nothing will be 587 * supported in this case; we do not need 588 * the firmware to know the CPU features. 589 */ 590 *val = (ptr[1] != 0) ? ptr[1] : cap[1]; 591 592 out: 593 if (buf.Pointer != NULL) 594 ACPI_FREE(buf.Pointer); 595 596 return rv; 597 } 598 599 static void 600 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux) 601 { 602 ACPI_OSD_EXEC_CALLBACK func; 603 struct acpicpu_softc *sc; 604 device_t self = aux; 605 606 sc = device_private(self); 607 608 if (sc->sc_cold != false) 609 return; 610 611 if (acpicpu_dynamic != true) 612 return; 613 614 switch (evt) { 615 616 case ACPICPU_C_NOTIFY: 617 618 if ((sc->sc_flags & ACPICPU_FLAG_C) == 0) 619 return; 620 621 func = acpicpu_cstate_callback; 622 break; 623 624 case ACPICPU_P_NOTIFY: 625 626 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) 627 return; 628 629 func = acpicpu_pstate_callback; 630 break; 631 632 case ACPICPU_T_NOTIFY: 633 634 if ((sc->sc_flags & ACPICPU_FLAG_T) == 0) 635 return; 636 637 func = acpicpu_tstate_callback; 638 break; 639 640 default: 641 aprint_error_dev(sc->sc_dev, "unknown notify: 0x%02X\n", evt); 642 return; 643 } 644 645 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev); 646 } 647 648 static bool 649 acpicpu_suspend(device_t self, const pmf_qual_t *qual) 650 { 651 struct acpicpu_softc *sc = device_private(self); 652 653 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 654 (void)acpicpu_cstate_suspend(self); 655 656 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 657 (void)acpicpu_pstate_suspend(self); 658 659 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 660 (void)acpicpu_tstate_suspend(self); 661 662 sc->sc_cold = true; 663 664 return true; 665 } 666 667 static bool 668 acpicpu_resume(device_t self, const pmf_qual_t *qual) 669 { 670 struct acpicpu_softc *sc = device_private(self); 671 672 sc->sc_cold = false; 673 674 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 675 (void)acpicpu_cstate_resume(self); 676 677 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 678 (void)acpicpu_pstate_resume(self); 679 680 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 681 (void)acpicpu_tstate_resume(self); 682 683 return true; 684 } 685 686 static void 687 acpicpu_evcnt_attach(device_t self) 688 { 689 struct acpicpu_softc *sc = device_private(self); 690 struct acpicpu_cstate *cs; 691 struct acpicpu_pstate *ps; 692 struct acpicpu_tstate *ts; 693 const char *str; 694 uint32_t i; 695 696 for (i = 0; i < __arraycount(sc->sc_cstate); i++) { 697 698 cs = &sc->sc_cstate[i]; 699 700 if (cs->cs_method == 0) 701 continue; 702 703 str = "HALT"; 704 705 if (cs->cs_method == ACPICPU_C_STATE_FFH) 706 str = "MWAIT"; 707 708 if (cs->cs_method == ACPICPU_C_STATE_SYSIO) 709 str = "I/O"; 710 711 (void)snprintf(cs->cs_name, sizeof(cs->cs_name), 712 "C%d (%s)", i, str); 713 714 evcnt_attach_dynamic(&cs->cs_evcnt, EVCNT_TYPE_MISC, 715 NULL, device_xname(sc->sc_dev), cs->cs_name); 716 } 717 718 for (i = 0; i < sc->sc_pstate_count; i++) { 719 720 ps = &sc->sc_pstate[i]; 721 722 if (ps->ps_freq == 0) 723 continue; 724 725 (void)snprintf(ps->ps_name, sizeof(ps->ps_name), 726 "P%u (%u MHz)", i, ps->ps_freq); 727 728 evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_MISC, 729 NULL, device_xname(sc->sc_dev), ps->ps_name); 730 } 731 732 for (i = 0; i < sc->sc_tstate_count; i++) { 733 734 ts = &sc->sc_tstate[i]; 735 736 if (ts->ts_percent == 0) 737 continue; 738 739 (void)snprintf(ts->ts_name, sizeof(ts->ts_name), 740 "T%u (%u %%)", i, ts->ts_percent); 741 742 evcnt_attach_dynamic(&ts->ts_evcnt, EVCNT_TYPE_MISC, 743 NULL, device_xname(sc->sc_dev), ts->ts_name); 744 } 745 } 746 747 static void 748 acpicpu_evcnt_detach(device_t self) 749 { 750 struct acpicpu_softc *sc = device_private(self); 751 struct acpicpu_cstate *cs; 752 struct acpicpu_pstate *ps; 753 struct acpicpu_tstate *ts; 754 uint32_t i; 755 756 for (i = 0; i < __arraycount(sc->sc_cstate); i++) { 757 758 cs = &sc->sc_cstate[i]; 759 760 if (cs->cs_method != 0) 761 evcnt_detach(&cs->cs_evcnt); 762 } 763 764 for (i = 0; i < sc->sc_pstate_count; i++) { 765 766 ps = &sc->sc_pstate[i]; 767 768 if (ps->ps_freq != 0) 769 evcnt_detach(&ps->ps_evcnt); 770 } 771 772 for (i = 0; i < sc->sc_tstate_count; i++) { 773 774 ts = &sc->sc_tstate[i]; 775 776 if (ts->ts_percent != 0) 777 evcnt_detach(&ts->ts_evcnt); 778 } 779 } 780 781 static void 782 acpicpu_debug_print(device_t self) 783 { 784 struct acpicpu_softc *sc = device_private(self); 785 struct cpu_info *ci = sc->sc_ci; 786 struct acpicpu_cstate *cs; 787 struct acpicpu_pstate *ps; 788 struct acpicpu_tstate *ts; 789 static bool once = false; 790 struct acpicpu_dep *dep; 791 uint32_t i, method; 792 793 if (once != true) { 794 795 for (i = 0; i < __arraycount(sc->sc_cstate); i++) { 796 797 cs = &sc->sc_cstate[i]; 798 799 if (cs->cs_method == 0) 800 continue; 801 802 aprint_verbose_dev(sc->sc_dev, "C%d: %3s, " 803 "lat %3u us, pow %5u mW%s\n", i, 804 acpicpu_debug_print_method(cs->cs_method), 805 cs->cs_latency, cs->cs_power, 806 (cs->cs_flags != 0) ? ", bus master check" : ""); 807 } 808 809 method = sc->sc_pstate_control.reg_spaceid; 810 811 for (i = 0; i < sc->sc_pstate_count; i++) { 812 813 ps = &sc->sc_pstate[i]; 814 815 if (ps->ps_freq == 0) 816 continue; 817 818 aprint_verbose_dev(sc->sc_dev, "P%d: %3s, " 819 "lat %3u us, pow %5u mW, %4u MHz%s\n", i, 820 acpicpu_debug_print_method(method), 821 ps->ps_latency, ps->ps_power, ps->ps_freq, 822 (ps->ps_flags & ACPICPU_FLAG_P_TURBO) != 0 ? 823 ", turbo boost" : ""); 824 } 825 826 method = sc->sc_tstate_control.reg_spaceid; 827 828 for (i = 0; i < sc->sc_tstate_count; i++) { 829 830 ts = &sc->sc_tstate[i]; 831 832 if (ts->ts_percent == 0) 833 continue; 834 835 aprint_verbose_dev(sc->sc_dev, "T%u: %3s, " 836 "lat %3u us, pow %5u mW, %3u %%\n", i, 837 acpicpu_debug_print_method(method), 838 ts->ts_latency, ts->ts_power, ts->ts_percent); 839 } 840 841 once = true; 842 } 843 844 aprint_debug_dev(sc->sc_dev, "id %u, lapic id %u, " 845 "cap 0x%04x, flags 0x%08x\n", ci->ci_acpiid, 846 (uint32_t)ci->ci_cpuid, sc->sc_cap, sc->sc_flags); 847 848 if ((sc->sc_flags & ACPICPU_FLAG_C_DEP) != 0) { 849 850 dep = &sc->sc_cstate_dep; 851 852 aprint_debug_dev(sc->sc_dev, "C-state coordination: " 853 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus, 854 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type)); 855 } 856 857 if ((sc->sc_flags & ACPICPU_FLAG_P_DEP) != 0) { 858 859 dep = &sc->sc_pstate_dep; 860 861 aprint_debug_dev(sc->sc_dev, "P-state coordination: " 862 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus, 863 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type)); 864 } 865 866 if ((sc->sc_flags & ACPICPU_FLAG_T_DEP) != 0) { 867 868 dep = &sc->sc_tstate_dep; 869 870 aprint_debug_dev(sc->sc_dev, "T-state coordination: " 871 "%u CPUs, domain %u, type %s\n", dep->dep_ncpus, 872 dep->dep_domain, acpicpu_debug_print_dep(dep->dep_type)); 873 } 874 } 875 876 static const char * 877 acpicpu_debug_print_method(uint8_t val) 878 { 879 880 switch (val) { 881 882 case ACPICPU_C_STATE_HALT: 883 return "HLT"; 884 885 case ACPICPU_C_STATE_FFH: 886 case ACPI_ADR_SPACE_FIXED_HARDWARE: 887 return "FFH"; 888 889 case ACPICPU_C_STATE_SYSIO: /* ACPI_ADR_SPACE_SYSTEM_IO */ 890 return "I/O"; 891 892 default: 893 return "???"; 894 } 895 } 896 897 static const char * 898 acpicpu_debug_print_dep(uint32_t val) 899 { 900 901 switch (val) { 902 903 case ACPICPU_DEP_SW_ALL: 904 return "SW_ALL"; 905 906 case ACPICPU_DEP_SW_ANY: 907 return "SW_ANY"; 908 909 case ACPICPU_DEP_HW_ALL: 910 return "HW_ALL"; 911 912 default: 913 return "unknown"; 914 } 915 } 916 917 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL); 918 919 #ifdef _MODULE 920 #include "ioconf.c" 921 #endif 922 923 static int 924 acpicpu_modcmd(modcmd_t cmd, void *aux) 925 { 926 int rv = 0; 927 928 switch (cmd) { 929 930 case MODULE_CMD_INIT: 931 932 #ifdef _MODULE 933 rv = config_init_component(cfdriver_ioconf_acpicpu, 934 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu); 935 #endif 936 break; 937 938 case MODULE_CMD_FINI: 939 940 #ifdef _MODULE 941 rv = config_fini_component(cfdriver_ioconf_acpicpu, 942 cfattach_ioconf_acpicpu, cfdata_ioconf_acpicpu); 943 #endif 944 break; 945 946 default: 947 rv = ENOTTY; 948 } 949 950 return rv; 951 } 952