1 /* $NetBSD: acpi_cpu.c,v 1.20 2010/08/19 05:09:53 jruoho Exp $ */ 2 3 /*- 4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.20 2010/08/19 05:09:53 jruoho Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/cpu.h> 34 #include <sys/kernel.h> 35 #include <sys/kmem.h> 36 #include <sys/module.h> 37 #include <sys/mutex.h> 38 #include <sys/once.h> 39 40 #include <dev/acpi/acpireg.h> 41 #include <dev/acpi/acpivar.h> 42 #include <dev/acpi/acpi_cpu.h> 43 44 #include <machine/acpi_machdep.h> 45 46 #define _COMPONENT ACPI_BUS_COMPONENT 47 ACPI_MODULE_NAME ("acpi_cpu") 48 49 static int acpicpu_match(device_t, cfdata_t, void *); 50 static void acpicpu_attach(device_t, device_t, void *); 51 static int acpicpu_detach(device_t, int); 52 static int acpicpu_once_attach(void); 53 static int acpicpu_once_detach(void); 54 static void acpicpu_prestart(device_t); 55 static void acpicpu_start(device_t); 56 57 static int acpicpu_object(ACPI_HANDLE, struct acpicpu_object *); 58 static cpuid_t acpicpu_id(uint32_t); 59 static uint32_t acpicpu_cap(struct acpicpu_softc *); 60 static ACPI_OBJECT *acpicpu_cap_init(void); 61 static ACPI_STATUS acpicpu_cap_pdc(ACPI_HANDLE); 62 static ACPI_STATUS acpicpu_cap_osc(ACPI_HANDLE, uint32_t *); 63 static const char *acpicpu_cap_oscerr(uint32_t); 64 static void acpicpu_notify(ACPI_HANDLE, uint32_t, void *); 65 static bool acpicpu_suspend(device_t, const pmf_qual_t *); 66 static bool acpicpu_resume(device_t, const pmf_qual_t *); 67 68 struct acpicpu_softc **acpicpu_sc = NULL; 69 70 static const char * const acpicpu_hid[] = { 71 "ACPI0007", 72 NULL 73 }; 74 75 CFATTACH_DECL_NEW(acpicpu, sizeof(struct acpicpu_softc), 76 acpicpu_match, acpicpu_attach, acpicpu_detach, NULL); 77 78 static int 79 acpicpu_match(device_t parent, cfdata_t match, void *aux) 80 { 81 struct acpi_attach_args *aa = aux; 82 struct acpicpu_object ao; 83 int rv; 84 85 if (aa->aa_node->ad_type != ACPI_TYPE_PROCESSOR) 86 return 0; 87 88 if (acpi_match_hid(aa->aa_node->ad_devinfo, acpicpu_hid) != 0) 89 return 1; 90 91 rv = acpicpu_object(aa->aa_node->ad_handle, &ao); 92 93 if (rv != 0 || acpicpu_id(ao.ao_procid) == 0xFFFFFF) 94 return 0; 95 96 return 1; 97 } 98 99 static void 100 acpicpu_attach(device_t parent, device_t self, void *aux) 101 { 102 struct acpicpu_softc *sc = device_private(self); 103 struct acpi_attach_args *aa = aux; 104 static ONCE_DECL(once_attach); 105 int rv; 106 107 rv = acpicpu_object(aa->aa_node->ad_handle, &sc->sc_object); 108 109 if (rv != 0) 110 return; 111 112 rv = RUN_ONCE(&once_attach, acpicpu_once_attach); 113 114 if (rv != 0) 115 return; 116 117 sc->sc_dev = self; 118 sc->sc_cold = true; 119 sc->sc_mapped = false; 120 sc->sc_passive = false; 121 sc->sc_iot = aa->aa_iot; 122 sc->sc_node = aa->aa_node; 123 sc->sc_cpuid = acpicpu_id(sc->sc_object.ao_procid); 124 125 if (sc->sc_cpuid == 0xFFFFFF) { 126 aprint_error(": invalid CPU ID\n"); 127 return; 128 } 129 130 if (acpicpu_sc[sc->sc_cpuid] != NULL) { 131 aprint_error(": already attached\n"); 132 return; 133 } 134 135 acpicpu_sc[sc->sc_cpuid] = sc; 136 137 sc->sc_cap = acpicpu_cap(sc); 138 sc->sc_flags |= acpicpu_md_quirks(); 139 140 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE); 141 142 aprint_naive("\n"); 143 aprint_normal(": ACPI CPU\n"); 144 145 /* 146 * We should claim the bus space. However, we do this only 147 * to announce that the space is in use. As is noted in 148 * ichlpcib(4), we can continue our I/O without bus_space(9). 149 */ 150 if (sc->sc_object.ao_pblklen == 6 && sc->sc_object.ao_pblkaddr != 0) { 151 152 rv = bus_space_map(sc->sc_iot, sc->sc_object.ao_pblkaddr, 153 sc->sc_object.ao_pblklen, 0, &sc->sc_ioh); 154 155 if (rv == 0) 156 sc->sc_mapped = true; 157 } 158 159 acpicpu_cstate_attach(self); 160 acpicpu_pstate_attach(self); 161 acpicpu_tstate_attach(self); 162 163 (void)config_defer(self, acpicpu_prestart); 164 (void)acpi_register_notify(sc->sc_node, acpicpu_notify); 165 (void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume); 166 } 167 168 static int 169 acpicpu_detach(device_t self, int flags) 170 { 171 struct acpicpu_softc *sc = device_private(self); 172 const bus_addr_t addr = sc->sc_object.ao_pblkaddr; 173 static ONCE_DECL(once_detach); 174 int rv = 0; 175 176 sc->sc_cold = true; 177 acpi_deregister_notify(sc->sc_node); 178 179 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 180 rv = acpicpu_cstate_detach(self); 181 182 if (rv != 0) 183 return rv; 184 185 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 186 rv = acpicpu_pstate_detach(self); 187 188 if (rv != 0) 189 return rv; 190 191 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 192 rv = acpicpu_tstate_detach(self); 193 194 if (rv != 0) 195 return rv; 196 197 rv = RUN_ONCE(&once_detach, acpicpu_once_detach); 198 199 if (rv != 0) 200 return rv; 201 202 if (sc->sc_mapped != false) 203 bus_space_unmap(sc->sc_iot, sc->sc_ioh, addr); 204 205 mutex_destroy(&sc->sc_mtx); 206 207 return 0; 208 } 209 210 static int 211 acpicpu_once_attach(void) 212 { 213 struct acpicpu_softc *sc; 214 unsigned int i; 215 216 acpicpu_sc = kmem_zalloc(maxcpus * sizeof(*sc), KM_SLEEP); 217 218 if (acpicpu_sc == NULL) 219 return ENOMEM; 220 221 for (i = 0; i < maxcpus; i++) 222 acpicpu_sc[i] = NULL; 223 224 return 0; 225 } 226 227 static int 228 acpicpu_once_detach(void) 229 { 230 struct acpicpu_softc *sc; 231 232 if (acpicpu_sc != NULL) 233 kmem_free(acpicpu_sc, maxcpus * sizeof(*sc)); 234 235 return 0; 236 } 237 238 static void 239 acpicpu_prestart(device_t self) 240 { 241 struct acpicpu_softc *sc = device_private(self); 242 static bool once = false; 243 244 if (once != false) { 245 sc->sc_cold = false; 246 return; 247 } 248 249 once = true; 250 251 (void)config_interrupts(self, acpicpu_start); 252 } 253 254 static void 255 acpicpu_start(device_t self) 256 { 257 struct acpicpu_softc *sc = device_private(self); 258 259 /* 260 * Run the state-specific initialization 261 * routines. These should be called only 262 * once, after interrupts are enabled and 263 * all ACPI CPUs have attached. 264 */ 265 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 266 acpicpu_cstate_start(self); 267 268 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 269 acpicpu_pstate_start(self); 270 271 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 272 acpicpu_tstate_start(self); 273 274 aprint_debug_dev(sc->sc_dev, "ACPI CPUs started (cap " 275 "0x%02x, flags 0x%06x)\n", sc->sc_cap, sc->sc_flags); 276 277 sc->sc_cold = false; 278 } 279 280 static int 281 acpicpu_object(ACPI_HANDLE hdl, struct acpicpu_object *ao) 282 { 283 ACPI_OBJECT *obj; 284 ACPI_BUFFER buf; 285 ACPI_STATUS rv; 286 287 rv = acpi_eval_struct(hdl, NULL, &buf); 288 289 if (ACPI_FAILURE(rv)) 290 return 1; 291 292 obj = buf.Pointer; 293 294 if (obj->Type != ACPI_TYPE_PROCESSOR) { 295 rv = AE_TYPE; 296 goto out; 297 } 298 299 if (obj->Processor.ProcId > (uint32_t)maxcpus) { 300 rv = AE_LIMIT; 301 goto out; 302 } 303 304 KDASSERT((uint64_t)obj->Processor.PblkAddress < UINT32_MAX); 305 306 if (ao != NULL) { 307 ao->ao_procid = obj->Processor.ProcId; 308 ao->ao_pblklen = obj->Processor.PblkLength; 309 ao->ao_pblkaddr = obj->Processor.PblkAddress; 310 } 311 312 out: 313 if (buf.Pointer != NULL) 314 ACPI_FREE(buf.Pointer); 315 316 return ACPI_FAILURE(rv) ? 1 : 0; 317 } 318 319 static cpuid_t 320 acpicpu_id(uint32_t id) 321 { 322 CPU_INFO_ITERATOR cii; 323 struct cpu_info *ci; 324 325 for (CPU_INFO_FOREACH(cii, ci)) { 326 327 if (id == ci->ci_acpiid) 328 return id; 329 } 330 331 return 0xFFFFFF; 332 } 333 334 static uint32_t 335 acpicpu_cap(struct acpicpu_softc *sc) 336 { 337 uint32_t cap[3] = { 0 }; 338 ACPI_STATUS rv; 339 int err; 340 341 /* 342 * Set machine-dependent processor capabilities. 343 * 344 * The _PDC was deprecated in ACPI 3.0 in favor of the _OSC, 345 * but firmware may expect that we evaluate it nevertheless. 346 */ 347 rv = acpicpu_cap_pdc(sc->sc_node->ad_handle); 348 349 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) 350 aprint_error_dev(sc->sc_dev, "failed to evaluate _PDC: " 351 "%s\n", AcpiFormatException(rv)); 352 353 rv = acpicpu_cap_osc(sc->sc_node->ad_handle, cap); 354 355 if (ACPI_FAILURE(rv) && rv != AE_NOT_FOUND) 356 aprint_error_dev(sc->sc_dev, "failed to evaluate _OSC: " 357 "%s\n", AcpiFormatException(rv)); 358 359 if (ACPI_SUCCESS(rv)) { 360 361 err = cap[0] & ~__BIT(0); 362 363 if (err != 0) { 364 aprint_error_dev(sc->sc_dev, "errors in " 365 "_OSC: %s\n", acpicpu_cap_oscerr(err)); 366 cap[2] = 0; 367 } 368 } 369 370 return cap[2]; 371 } 372 373 static ACPI_OBJECT * 374 acpicpu_cap_init(void) 375 { 376 static uint32_t cap[3]; 377 static ACPI_OBJECT obj; 378 379 cap[0] = ACPICPU_PDC_REVID; 380 cap[1] = 1; 381 cap[2] = acpicpu_md_cap(); 382 383 obj.Type = ACPI_TYPE_BUFFER; 384 obj.Buffer.Length = sizeof(cap); 385 obj.Buffer.Pointer = (uint8_t *)cap; 386 387 return &obj; 388 } 389 390 static ACPI_STATUS 391 acpicpu_cap_pdc(ACPI_HANDLE hdl) 392 { 393 ACPI_OBJECT_LIST arg_list; 394 395 arg_list.Count = 1; 396 arg_list.Pointer = acpicpu_cap_init(); 397 398 return AcpiEvaluateObject(hdl, "_PDC", &arg_list, NULL); 399 } 400 401 static ACPI_STATUS 402 acpicpu_cap_osc(ACPI_HANDLE hdl, uint32_t *val) 403 { 404 ACPI_OBJECT_LIST arg_list; 405 ACPI_OBJECT *cap, *obj; 406 ACPI_OBJECT arg[4]; 407 ACPI_BUFFER buf; 408 ACPI_STATUS rv; 409 410 /* Intel. */ 411 static uint8_t cpu_oscuuid[16] = { 412 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 0xBE, 0x47, 413 0x9E, 0xBD, 0xD8, 0x70, 0x58, 0x71, 0x39, 0x53 414 }; 415 416 cap = acpicpu_cap_init(); 417 418 arg_list.Count = 4; 419 arg_list.Pointer = arg; 420 421 arg[0].Type = ACPI_TYPE_BUFFER; 422 arg[0].Buffer.Length = sizeof(cpu_oscuuid); 423 arg[0].Buffer.Pointer = cpu_oscuuid; 424 425 arg[1].Type = ACPI_TYPE_INTEGER; 426 arg[1].Integer.Value = ACPICPU_PDC_REVID; 427 428 arg[2].Type = ACPI_TYPE_INTEGER; 429 arg[2].Integer.Value = cap->Buffer.Length / sizeof(uint32_t); 430 431 arg[3] = *cap; 432 433 buf.Pointer = NULL; 434 buf.Length = ACPI_ALLOCATE_LOCAL_BUFFER; 435 436 rv = AcpiEvaluateObject(hdl, "_OSC", &arg_list, &buf); 437 438 if (ACPI_FAILURE(rv)) 439 return rv; 440 441 obj = buf.Pointer; 442 443 if (obj->Type != ACPI_TYPE_BUFFER) { 444 rv = AE_TYPE; 445 goto out; 446 } 447 448 if (obj->Buffer.Length != cap->Buffer.Length) { 449 rv = AE_BUFFER_OVERFLOW; 450 goto out; 451 } 452 453 (void)memcpy(val, obj->Buffer.Pointer, obj->Buffer.Length); 454 455 out: 456 if (buf.Pointer != NULL) 457 ACPI_FREE(buf.Pointer); 458 459 return rv; 460 } 461 462 static const char * 463 acpicpu_cap_oscerr(uint32_t err) 464 { 465 466 KASSERT((err & __BIT(0)) == 0); 467 468 if ((err & __BIT(1)) != 0) 469 return "_OSC failure"; 470 471 if ((err & __BIT(2)) != 0) 472 return "unrecognized UUID"; 473 474 if ((err & __BIT(3)) != 0) 475 return "unrecognized revision"; 476 477 if ((err & __BIT(4)) != 0) 478 return "capabilities masked"; 479 480 return "unknown error"; 481 } 482 483 static void 484 acpicpu_notify(ACPI_HANDLE hdl, uint32_t evt, void *aux) 485 { 486 ACPI_OSD_EXEC_CALLBACK func; 487 struct acpicpu_softc *sc; 488 device_t self = aux; 489 490 sc = device_private(self); 491 492 if (sc->sc_cold != false) 493 return; 494 495 switch (evt) { 496 497 case ACPICPU_C_NOTIFY: 498 499 if ((sc->sc_flags & ACPICPU_FLAG_C) == 0) 500 return; 501 502 func = acpicpu_cstate_callback; 503 break; 504 505 case ACPICPU_P_NOTIFY: 506 507 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) 508 return; 509 510 func = acpicpu_pstate_callback; 511 break; 512 513 case ACPICPU_T_NOTIFY: 514 515 if ((sc->sc_flags & ACPICPU_FLAG_T) == 0) 516 return; 517 518 func = acpicpu_tstate_callback; 519 break; 520 521 default: 522 aprint_error_dev(sc->sc_dev, "unknown notify: 0x%02X\n", evt); 523 return; 524 } 525 526 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev); 527 } 528 529 static bool 530 acpicpu_suspend(device_t self, const pmf_qual_t *qual) 531 { 532 struct acpicpu_softc *sc = device_private(self); 533 534 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 535 (void)acpicpu_cstate_suspend(self); 536 537 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 538 (void)acpicpu_pstate_suspend(self); 539 540 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 541 (void)acpicpu_tstate_suspend(self); 542 543 sc->sc_cold = true; 544 545 return true; 546 } 547 548 static bool 549 acpicpu_resume(device_t self, const pmf_qual_t *qual) 550 { 551 struct acpicpu_softc *sc = device_private(self); 552 553 sc->sc_cold = false; 554 555 if ((sc->sc_flags & ACPICPU_FLAG_C) != 0) 556 (void)acpicpu_cstate_resume(self); 557 558 if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) 559 (void)acpicpu_pstate_resume(self); 560 561 if ((sc->sc_flags & ACPICPU_FLAG_T) != 0) 562 (void)acpicpu_tstate_resume(self); 563 564 return true; 565 } 566 567 #ifdef _MODULE 568 569 MODULE(MODULE_CLASS_DRIVER, acpicpu, NULL); 570 CFDRIVER_DECL(acpicpu, DV_DULL, NULL); 571 572 static int acpicpuloc[] = { -1 }; 573 extern struct cfattach acpicpu_ca; 574 575 static struct cfparent acpiparent = { 576 "acpinodebus", NULL, DVUNIT_ANY 577 }; 578 579 static struct cfdata acpicpu_cfdata[] = { 580 { 581 .cf_name = "acpicpu", 582 .cf_atname = "acpicpu", 583 .cf_unit = 0, 584 .cf_fstate = FSTATE_STAR, 585 .cf_loc = acpicpuloc, 586 .cf_flags = 0, 587 .cf_pspec = &acpiparent, 588 }, 589 590 { NULL, NULL, 0, 0, NULL, 0, NULL } 591 }; 592 593 static int 594 acpicpu_modcmd(modcmd_t cmd, void *context) 595 { 596 int err; 597 598 switch (cmd) { 599 600 case MODULE_CMD_INIT: 601 602 err = config_cfdriver_attach(&acpicpu_cd); 603 604 if (err != 0) 605 return err; 606 607 err = config_cfattach_attach("acpicpu", &acpicpu_ca); 608 609 if (err != 0) { 610 config_cfdriver_detach(&acpicpu_cd); 611 return err; 612 } 613 614 err = config_cfdata_attach(acpicpu_cfdata, 1); 615 616 if (err != 0) { 617 config_cfattach_detach("acpicpu", &acpicpu_ca); 618 config_cfdriver_detach(&acpicpu_cd); 619 return err; 620 } 621 622 return 0; 623 624 case MODULE_CMD_FINI: 625 626 err = config_cfdata_detach(acpicpu_cfdata); 627 628 if (err != 0) 629 return err; 630 631 config_cfattach_detach("acpicpu", &acpicpu_ca); 632 config_cfdriver_detach(&acpicpu_cd); 633 634 return 0; 635 636 default: 637 return ENOTTY; 638 } 639 } 640 641 #endif /* _MODULE */ 642